blob: f4b9b81e3ed671330c2a176829876522d3c28f7f [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +09003 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kim127e6702012-11-02 17:08:18 +09007 */
8#include <linux/fs.h>
9#include <linux/bio.h>
10#include <linux/mpage.h>
11#include <linux/writeback.h>
12#include <linux/blkdev.h>
13#include <linux/f2fs_fs.h>
14#include <linux/pagevec.h>
15#include <linux/swap.h>
16
17#include "f2fs.h"
18#include "node.h"
19#include "segment.h"
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -080020#include "trace.h"
Namjae Jeon2af4bd62013-04-23 18:26:54 +090021#include <trace/events/f2fs.h>
Jaegeuk Kim127e6702012-11-02 17:08:18 +090022
Jaegeuk Kim6451e042014-07-25 15:47:17 -070023static struct kmem_cache *ino_entry_slab;
Chao Yu4d57b862018-05-30 00:20:41 +080024struct kmem_cache *f2fs_inode_entry_slab;
Jaegeuk Kim127e6702012-11-02 17:08:18 +090025
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -070026void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
27{
Chao Yud4945002018-08-08 17:36:41 +080028 f2fs_build_fault_attr(sbi, 0, 0);
Chao Yuaaec2b12016-09-20 11:04:18 +080029 set_ckpt_flags(sbi, CP_ERROR_FLAG);
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -070030 if (!end_io)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -070031 f2fs_flush_merged_writes(sbi);
Jaegeuk Kim38f91ca2016-05-18 14:07:56 -070032}
33
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090034/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +090035 * We guarantee no failure on the returned page.
36 */
Chao Yu4d57b862018-05-30 00:20:41 +080037struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
Jaegeuk Kim127e6702012-11-02 17:08:18 +090038{
Gu Zheng9df27d92014-01-20 18:37:04 +080039 struct address_space *mapping = META_MAPPING(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090040 struct page *page = NULL;
41repeat:
Jaegeuk Kim300e1292016-04-29 16:11:53 -070042 page = f2fs_grab_cache_page(mapping, index, false);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090043 if (!page) {
44 cond_resched();
45 goto repeat;
46 }
Jaegeuk Kimfec1d652016-01-20 23:43:51 +080047 f2fs_wait_on_page_writeback(page, META, true);
Jaegeuk Kim237c0792016-06-30 18:49:15 -070048 if (!PageUptodate(page))
49 SetPageUptodate(page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090050 return page;
51}
52
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +090053/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +090054 * We guarantee no failure on the returned page.
55 */
Chao Yu2b947002015-10-12 17:04:21 +080056static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
57 bool is_meta)
Jaegeuk Kim127e6702012-11-02 17:08:18 +090058{
Gu Zheng9df27d92014-01-20 18:37:04 +080059 struct address_space *mapping = META_MAPPING(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090060 struct page *page;
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -080061 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -070062 .sbi = sbi,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -080063 .type = META,
Mike Christie04d328d2016-06-05 14:31:55 -050064 .op = REQ_OP_READ,
Christoph Hellwig70fd7612016-11-01 07:40:10 -060065 .op_flags = REQ_META | REQ_PRIO,
Chao Yu7a9d7542016-02-22 18:36:38 +080066 .old_blkaddr = index,
67 .new_blkaddr = index,
Jaegeuk Kim4375a332015-04-23 12:04:33 -070068 .encrypted_page = NULL,
Yunlei He08337212018-03-08 16:29:13 +080069 .is_meta = is_meta,
Jaegeuk Kimcf04e8e2014-12-17 19:33:13 -080070 };
Chao Yu77357302018-07-17 00:02:17 +080071 int err;
Chao Yu2b947002015-10-12 17:04:21 +080072
73 if (unlikely(!is_meta))
Mike Christie04d328d2016-06-05 14:31:55 -050074 fio.op_flags &= ~REQ_META;
Jaegeuk Kim127e6702012-11-02 17:08:18 +090075repeat:
Jaegeuk Kim300e1292016-04-29 16:11:53 -070076 page = f2fs_grab_cache_page(mapping, index, false);
Jaegeuk Kim127e6702012-11-02 17:08:18 +090077 if (!page) {
78 cond_resched();
79 goto repeat;
80 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +090081 if (PageUptodate(page))
82 goto out;
Jaegeuk Kim127e6702012-11-02 17:08:18 +090083
Jaegeuk Kim05ca3632015-04-23 14:38:15 -070084 fio.page = page;
85
Chao Yu77357302018-07-17 00:02:17 +080086 err = f2fs_submit_page_bio(&fio);
87 if (err) {
88 f2fs_put_page(page, 1);
89 return ERR_PTR(err);
Jaegeuk Kim86531d62015-07-15 13:08:21 -070090 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +090091
92 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +090093 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +090094 f2fs_put_page(page, 1);
95 goto repeat;
96 }
Chao Yuf3f338c2015-07-29 17:33:13 +080097
Chao Yu81114ba2018-04-09 20:25:06 +080098 if (unlikely(!PageUptodate(page))) {
Chao Yu77357302018-07-17 00:02:17 +080099 f2fs_put_page(page, 1);
100 return ERR_PTR(-EIO);
Chao Yu81114ba2018-04-09 20:25:06 +0800101 }
Jaegeuk Kim393ff912013-03-08 21:29:23 +0900102out:
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900103 return page;
104}
105
Chao Yu4d57b862018-05-30 00:20:41 +0800106struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
Chao Yu2b947002015-10-12 17:04:21 +0800107{
108 return __get_meta_page(sbi, index, true);
109}
110
Chao Yu77357302018-07-17 00:02:17 +0800111struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
112{
113 struct page *page;
114 int count = 0;
115
116retry:
117 page = __get_meta_page(sbi, index, true);
118 if (IS_ERR(page)) {
119 if (PTR_ERR(page) == -EIO &&
120 ++count <= DEFAULT_RETRY_IO_COUNT)
121 goto retry;
Chao Yu77357302018-07-17 00:02:17 +0800122 f2fs_stop_checkpoint(sbi, false);
Chao Yu77357302018-07-17 00:02:17 +0800123 }
Chao Yu77357302018-07-17 00:02:17 +0800124 return page;
125}
126
Chao Yu2b947002015-10-12 17:04:21 +0800127/* for POR only */
Chao Yu4d57b862018-05-30 00:20:41 +0800128struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
Chao Yu2b947002015-10-12 17:04:21 +0800129{
130 return __get_meta_page(sbi, index, false);
131}
132
Chao Yue1da7872018-06-05 17:44:11 +0800133bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
Chao Yu4d57b862018-05-30 00:20:41 +0800134 block_t blkaddr, int type)
Chao Yu662befd2014-02-07 16:11:53 +0800135{
136 switch (type) {
137 case META_NAT:
Chao Yu66b00c12014-12-08 14:59:17 +0800138 break;
Chao Yu662befd2014-02-07 16:11:53 +0800139 case META_SIT:
Chao Yu66b00c12014-12-08 14:59:17 +0800140 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
141 return false;
142 break;
Chao Yu81c1a0f12014-02-27 19:12:24 +0800143 case META_SSA:
Chao Yu66b00c12014-12-08 14:59:17 +0800144 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
145 blkaddr < SM_I(sbi)->ssa_blkaddr))
146 return false;
147 break;
Chao Yu662befd2014-02-07 16:11:53 +0800148 case META_CP:
Chao Yu66b00c12014-12-08 14:59:17 +0800149 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
150 blkaddr < __start_cp_addr(sbi)))
151 return false;
152 break;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700153 case META_POR:
Chao Yue1da7872018-06-05 17:44:11 +0800154 case DATA_GENERIC:
Chao Yu66b00c12014-12-08 14:59:17 +0800155 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
Chao Yuc9b60782018-08-01 19:13:44 +0800156 blkaddr < MAIN_BLKADDR(sbi))) {
157 if (type == DATA_GENERIC) {
158 f2fs_msg(sbi->sb, KERN_WARNING,
159 "access invalid blkaddr:%u", blkaddr);
160 WARN_ON(1);
161 }
Chao Yu66b00c12014-12-08 14:59:17 +0800162 return false;
Chao Yuc9b60782018-08-01 19:13:44 +0800163 }
Chao Yu66b00c12014-12-08 14:59:17 +0800164 break;
Chao Yue1da7872018-06-05 17:44:11 +0800165 case META_GENERIC:
166 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
167 blkaddr >= MAIN_BLKADDR(sbi)))
168 return false;
169 break;
Chao Yu662befd2014-02-07 16:11:53 +0800170 default:
171 BUG();
172 }
Chao Yu66b00c12014-12-08 14:59:17 +0800173
174 return true;
Chao Yu662befd2014-02-07 16:11:53 +0800175}
176
177/*
Chao Yu81c1a0f12014-02-27 19:12:24 +0800178 * Readahead CP/NAT/SIT/SSA pages
Chao Yu662befd2014-02-07 16:11:53 +0800179 */
Chao Yu4d57b862018-05-30 00:20:41 +0800180int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
Chao Yu26879fb2015-10-12 17:05:59 +0800181 int type, bool sync)
Chao Yu662befd2014-02-07 16:11:53 +0800182{
Chao Yu662befd2014-02-07 16:11:53 +0800183 struct page *page;
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700184 block_t blkno = start;
Chao Yu662befd2014-02-07 16:11:53 +0800185 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700186 .sbi = sbi,
Chao Yu662befd2014-02-07 16:11:53 +0800187 .type = META,
Mike Christie04d328d2016-06-05 14:31:55 -0500188 .op = REQ_OP_READ,
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600189 .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700190 .encrypted_page = NULL,
Chao Yufb830fc2017-05-19 23:37:01 +0800191 .in_list = false,
Yunlei He08337212018-03-08 16:29:13 +0800192 .is_meta = (type != META_POR),
Chao Yu662befd2014-02-07 16:11:53 +0800193 };
Chao Yue9f5b8b2016-02-14 18:54:33 +0800194 struct blk_plug plug;
Chao Yu662befd2014-02-07 16:11:53 +0800195
Chao Yu2b947002015-10-12 17:04:21 +0800196 if (unlikely(type == META_POR))
Mike Christie04d328d2016-06-05 14:31:55 -0500197 fio.op_flags &= ~REQ_META;
Chao Yu2b947002015-10-12 17:04:21 +0800198
Chao Yue9f5b8b2016-02-14 18:54:33 +0800199 blk_start_plug(&plug);
Chao Yu662befd2014-02-07 16:11:53 +0800200 for (; nrpages-- > 0; blkno++) {
Chao Yu662befd2014-02-07 16:11:53 +0800201
Chao Yue1da7872018-06-05 17:44:11 +0800202 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
Chao Yu66b00c12014-12-08 14:59:17 +0800203 goto out;
204
Chao Yu662befd2014-02-07 16:11:53 +0800205 switch (type) {
206 case META_NAT:
Chao Yu66b00c12014-12-08 14:59:17 +0800207 if (unlikely(blkno >=
208 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
Chao Yu662befd2014-02-07 16:11:53 +0800209 blkno = 0;
Chao Yu66b00c12014-12-08 14:59:17 +0800210 /* get nat block addr */
Chao Yu7a9d7542016-02-22 18:36:38 +0800211 fio.new_blkaddr = current_nat_addr(sbi,
Chao Yu662befd2014-02-07 16:11:53 +0800212 blkno * NAT_ENTRY_PER_BLOCK);
213 break;
214 case META_SIT:
215 /* get sit block addr */
Chao Yu7a9d7542016-02-22 18:36:38 +0800216 fio.new_blkaddr = current_sit_addr(sbi,
Chao Yu662befd2014-02-07 16:11:53 +0800217 blkno * SIT_ENTRY_PER_BLOCK);
Chao Yu662befd2014-02-07 16:11:53 +0800218 break;
Chao Yu81c1a0f12014-02-27 19:12:24 +0800219 case META_SSA:
Chao Yu662befd2014-02-07 16:11:53 +0800220 case META_CP:
Jaegeuk Kim4c521f492014-09-11 13:49:55 -0700221 case META_POR:
Chao Yu7a9d7542016-02-22 18:36:38 +0800222 fio.new_blkaddr = blkno;
Chao Yu662befd2014-02-07 16:11:53 +0800223 break;
224 default:
225 BUG();
226 }
227
Jaegeuk Kim300e1292016-04-29 16:11:53 -0700228 page = f2fs_grab_cache_page(META_MAPPING(sbi),
229 fio.new_blkaddr, false);
Chao Yu662befd2014-02-07 16:11:53 +0800230 if (!page)
231 continue;
232 if (PageUptodate(page)) {
Chao Yu662befd2014-02-07 16:11:53 +0800233 f2fs_put_page(page, 1);
234 continue;
235 }
236
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700237 fio.page = page;
Jaegeuk Kim1919ffc2017-05-10 11:23:36 -0700238 f2fs_submit_page_bio(&fio);
Chao Yu662befd2014-02-07 16:11:53 +0800239 f2fs_put_page(page, 0);
240 }
241out:
Chao Yue9f5b8b2016-02-14 18:54:33 +0800242 blk_finish_plug(&plug);
Chao Yu662befd2014-02-07 16:11:53 +0800243 return blkno - start;
244}
245
Chao Yu4d57b862018-05-30 00:20:41 +0800246void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
Chao Yu635aee12014-12-08 15:02:52 +0800247{
248 struct page *page;
249 bool readahead = false;
250
251 page = find_get_page(META_MAPPING(sbi), index);
Jaegeuk Kim4da7bf52016-04-06 11:27:03 -0700252 if (!page || !PageUptodate(page))
Chao Yu635aee12014-12-08 15:02:52 +0800253 readahead = true;
254 f2fs_put_page(page, 0);
255
256 if (readahead)
Chao Yu4d57b862018-05-30 00:20:41 +0800257 f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
Chao Yu635aee12014-12-08 15:02:52 +0800258}
259
Chao Yub0af6d42017-08-02 23:21:48 +0800260static int __f2fs_write_meta_page(struct page *page,
261 struct writeback_control *wbc,
262 enum iostat_type io_type)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900263{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700264 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900265
Chao Yuecda0de2014-05-06 16:48:26 +0800266 trace_f2fs_writepage(page, META);
267
Jaegeuk Kimaf697c02018-07-11 18:30:42 -0700268 if (unlikely(f2fs_cp_error(sbi)))
269 goto redirty_out;
Chao Yucaf00472015-01-28 17:48:42 +0800270 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
Chao Yucfb271d2013-12-05 17:15:22 +0800271 goto redirty_out;
Jaegeuk Kim857dc4e2014-11-19 11:03:34 -0800272 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
Chao Yucfb271d2013-12-05 17:15:22 +0800273 goto redirty_out;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900274
Chao Yu4d57b862018-05-30 00:20:41 +0800275 f2fs_do_write_meta_page(sbi, page, io_type);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900276 dec_page_count(sbi, F2FS_DIRTY_META);
Chao Yu0c3a5792016-01-18 18:28:11 +0800277
278 if (wbc->for_reclaim)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700279 f2fs_submit_merged_write_cond(sbi, page->mapping->host,
280 0, page->index, META);
Chao Yu0c3a5792016-01-18 18:28:11 +0800281
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900282 unlock_page(page);
Jaegeuk Kim857dc4e2014-11-19 11:03:34 -0800283
Chao Yu0c3a5792016-01-18 18:28:11 +0800284 if (unlikely(f2fs_cp_error(sbi)))
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700285 f2fs_submit_merged_write(sbi, META);
Chao Yu0c3a5792016-01-18 18:28:11 +0800286
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900287 return 0;
Chao Yucfb271d2013-12-05 17:15:22 +0800288
289redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +0900290 redirty_page_for_writepage(wbc, page);
Chao Yucfb271d2013-12-05 17:15:22 +0800291 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900292}
293
Chao Yub0af6d42017-08-02 23:21:48 +0800294static int f2fs_write_meta_page(struct page *page,
295 struct writeback_control *wbc)
296{
297 return __f2fs_write_meta_page(page, wbc, FS_META_IO);
298}
299
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900300static int f2fs_write_meta_pages(struct address_space *mapping,
301 struct writeback_control *wbc)
302{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700303 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900304 long diff, written;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900305
Chao Yu0771fcc2017-06-29 23:20:45 +0800306 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
307 goto skip_write;
308
Jaegeuk Kim5459aa92013-12-17 17:28:41 +0900309 /* collect a number of dirty meta pages and write together */
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900310 if (wbc->for_kupdate ||
311 get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
Jaegeuk Kimd3baf952014-03-18 13:43:05 +0900312 goto skip_write;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900313
Yunlei Hea29d0e02017-03-01 18:07:10 +0800314 /* if locked failed, cp will flush dirty pages instead */
315 if (!mutex_trylock(&sbi->cp_mutex))
316 goto skip_write;
Yunlei Hed31c7c32016-02-04 16:14:00 +0800317
Yunlei Hea29d0e02017-03-01 18:07:10 +0800318 trace_f2fs_writepages(mapping->host, wbc, META);
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900319 diff = nr_pages_to_write(sbi, META, wbc);
Chao Yu4d57b862018-05-30 00:20:41 +0800320 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900321 mutex_unlock(&sbi->cp_mutex);
Jaegeuk Kim50c8cdb2014-03-18 13:47:11 +0900322 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900323 return 0;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +0900324
325skip_write:
326 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
Yunlei Hed31c7c32016-02-04 16:14:00 +0800327 trace_f2fs_writepages(mapping->host, wbc, META);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +0900328 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900329}
330
Chao Yu4d57b862018-05-30 00:20:41 +0800331long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
Chao Yub0af6d42017-08-02 23:21:48 +0800332 long nr_to_write, enum iostat_type io_type)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900333{
Gu Zheng9df27d92014-01-20 18:37:04 +0800334 struct address_space *mapping = META_MAPPING(sbi);
Jan Kara028a63a2017-11-15 17:34:51 -0800335 pgoff_t index = 0, prev = ULONG_MAX;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900336 struct pagevec pvec;
337 long nwritten = 0;
Jan Kara028a63a2017-11-15 17:34:51 -0800338 int nr_pages;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900339 struct writeback_control wbc = {
340 .for_reclaim = 0,
341 };
Chao Yue9f5b8b2016-02-14 18:54:33 +0800342 struct blk_plug plug;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900343
Mel Gorman86679822017-11-15 17:37:52 -0800344 pagevec_init(&pvec);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900345
Chao Yue9f5b8b2016-02-14 18:54:33 +0800346 blk_start_plug(&plug);
347
Jan Kara028a63a2017-11-15 17:34:51 -0800348 while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
Jan Kara67fd7072017-11-15 17:35:19 -0800349 PAGECACHE_TAG_DIRTY))) {
Jan Kara028a63a2017-11-15 17:34:51 -0800350 int i;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900351
352 for (i = 0; i < nr_pages; i++) {
353 struct page *page = pvec.pages[i];
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900354
Chao Yu80dd9c02016-02-24 17:20:44 +0800355 if (prev == ULONG_MAX)
Jaegeuk Kim6066d8c2015-10-01 16:42:55 -0700356 prev = page->index - 1;
357 if (nr_to_write != LONG_MAX && page->index != prev + 1) {
358 pagevec_release(&pvec);
359 goto stop;
360 }
361
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900362 lock_page(page);
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900363
364 if (unlikely(page->mapping != mapping)) {
365continue_unlock:
366 unlock_page(page);
367 continue;
368 }
369 if (!PageDirty(page)) {
370 /* someone wrote it for us */
371 goto continue_unlock;
372 }
373
Jaegeuk Kimfa3d2bd2016-01-28 11:48:52 -0800374 f2fs_wait_on_page_writeback(page, META, true);
375
376 BUG_ON(PageWriteback(page));
Jaegeuk Kim203681f2014-02-05 13:03:57 +0900377 if (!clear_page_dirty_for_io(page))
378 goto continue_unlock;
379
Chao Yub0af6d42017-08-02 23:21:48 +0800380 if (__f2fs_write_meta_page(page, &wbc, io_type)) {
Jaegeuk Kim577e3492013-01-24 19:56:11 +0900381 unlock_page(page);
382 break;
383 }
Chao Yucfb271d2013-12-05 17:15:22 +0800384 nwritten++;
Jaegeuk Kim6066d8c2015-10-01 16:42:55 -0700385 prev = page->index;
Chao Yucfb271d2013-12-05 17:15:22 +0800386 if (unlikely(nwritten >= nr_to_write))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900387 break;
388 }
389 pagevec_release(&pvec);
390 cond_resched();
391 }
Jaegeuk Kim6066d8c2015-10-01 16:42:55 -0700392stop:
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900393 if (nwritten)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700394 f2fs_submit_merged_write(sbi, type);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900395
Chao Yue9f5b8b2016-02-14 18:54:33 +0800396 blk_finish_plug(&plug);
397
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900398 return nwritten;
399}
400
401static int f2fs_set_meta_page_dirty(struct page *page)
402{
Jaegeuk Kim26c6b882013-10-24 17:53:29 +0900403 trace_f2fs_set_page_dirty(page, META);
404
Jaegeuk Kim237c0792016-06-30 18:49:15 -0700405 if (!PageUptodate(page))
406 SetPageUptodate(page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900407 if (!PageDirty(page)) {
Jaegeuk Kimb87078a2018-04-20 19:29:52 -0700408 __set_page_dirty_nobuffers(page);
Jaegeuk Kim40813632014-09-02 15:31:18 -0700409 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
Chao Yu16018392015-01-19 20:24:37 +0800410 SetPagePrivate(page);
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -0800411 f2fs_trace_pid(page);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900412 return 1;
413 }
414 return 0;
415}
416
417const struct address_space_operations f2fs_meta_aops = {
418 .writepage = f2fs_write_meta_page,
419 .writepages = f2fs_write_meta_pages,
420 .set_page_dirty = f2fs_set_meta_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +0800421 .invalidatepage = f2fs_invalidate_page,
422 .releasepage = f2fs_release_page,
Weichao Guo5b7a4872016-09-20 05:03:27 +0800423#ifdef CONFIG_MIGRATION
424 .migratepage = f2fs_migrate_page,
425#endif
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900426};
427
Chao Yu39d787b2017-09-29 13:59:38 +0800428static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
429 unsigned int devidx, int type)
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700430{
Chao Yu67298802014-11-18 11:18:36 +0800431 struct inode_management *im = &sbi->im[type];
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700432 struct ino_entry *e, *tmp;
433
434 tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
Chao Yu19526d72017-11-10 09:30:42 +0800435
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700436 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800437
Chao Yu67298802014-11-18 11:18:36 +0800438 spin_lock(&im->ino_lock);
Chao Yu67298802014-11-18 11:18:36 +0800439 e = radix_tree_lookup(&im->ino_root, ino);
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700440 if (!e) {
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700441 e = tmp;
Chao Yu19526d72017-11-10 09:30:42 +0800442 if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
443 f2fs_bug_on(sbi, 1);
444
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700445 memset(e, 0, sizeof(struct ino_entry));
446 e->ino = ino;
447
Chao Yu67298802014-11-18 11:18:36 +0800448 list_add_tail(&e->list, &im->ino_list);
Jaegeuk Kim8c402942014-11-06 15:16:04 -0800449 if (type != ORPHAN_INO)
Chao Yu67298802014-11-18 11:18:36 +0800450 im->ino_num++;
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700451 }
Chao Yu39d787b2017-09-29 13:59:38 +0800452
453 if (type == FLUSH_INO)
454 f2fs_set_bit(devidx, (char *)&e->dirty_device);
455
Chao Yu67298802014-11-18 11:18:36 +0800456 spin_unlock(&im->ino_lock);
Jaegeuk Kim769ec6e2014-12-03 20:47:26 -0800457 radix_tree_preload_end();
Jaegeuk Kim80c54502015-08-20 08:51:56 -0700458
459 if (e != tmp)
460 kmem_cache_free(ino_entry_slab, tmp);
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700461}
462
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700463static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700464{
Chao Yu67298802014-11-18 11:18:36 +0800465 struct inode_management *im = &sbi->im[type];
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700466 struct ino_entry *e;
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700467
Chao Yu67298802014-11-18 11:18:36 +0800468 spin_lock(&im->ino_lock);
469 e = radix_tree_lookup(&im->ino_root, ino);
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700470 if (e) {
471 list_del(&e->list);
Chao Yu67298802014-11-18 11:18:36 +0800472 radix_tree_delete(&im->ino_root, ino);
473 im->ino_num--;
474 spin_unlock(&im->ino_lock);
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700475 kmem_cache_free(ino_entry_slab, e);
476 return;
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700477 }
Chao Yu67298802014-11-18 11:18:36 +0800478 spin_unlock(&im->ino_lock);
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700479}
480
Chao Yu4d57b862018-05-30 00:20:41 +0800481void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700482{
483 /* add new dirty ino entry into list */
Chao Yu39d787b2017-09-29 13:59:38 +0800484 __add_ino_entry(sbi, ino, 0, type);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700485}
486
Chao Yu4d57b862018-05-30 00:20:41 +0800487void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700488{
489 /* remove dirty ino entry from list */
490 __remove_ino_entry(sbi, ino, type);
491}
492
493/* mode should be APPEND_INO or UPDATE_INO */
Chao Yu4d57b862018-05-30 00:20:41 +0800494bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700495{
Chao Yu67298802014-11-18 11:18:36 +0800496 struct inode_management *im = &sbi->im[mode];
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700497 struct ino_entry *e;
Chao Yu67298802014-11-18 11:18:36 +0800498
499 spin_lock(&im->ino_lock);
500 e = radix_tree_lookup(&im->ino_root, ino);
501 spin_unlock(&im->ino_lock);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700502 return e ? true : false;
503}
504
Chao Yu4d57b862018-05-30 00:20:41 +0800505void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all)
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700506{
507 struct ino_entry *e, *tmp;
508 int i;
509
Chao Yu39d787b2017-09-29 13:59:38 +0800510 for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) {
Chao Yu67298802014-11-18 11:18:36 +0800511 struct inode_management *im = &sbi->im[i];
512
513 spin_lock(&im->ino_lock);
514 list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700515 list_del(&e->list);
Chao Yu67298802014-11-18 11:18:36 +0800516 radix_tree_delete(&im->ino_root, e->ino);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700517 kmem_cache_free(ino_entry_slab, e);
Chao Yu67298802014-11-18 11:18:36 +0800518 im->ino_num--;
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700519 }
Chao Yu67298802014-11-18 11:18:36 +0800520 spin_unlock(&im->ino_lock);
Jaegeuk Kimfff04f92014-07-25 07:40:59 -0700521 }
522}
523
Chao Yu4d57b862018-05-30 00:20:41 +0800524void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
Chao Yu39d787b2017-09-29 13:59:38 +0800525 unsigned int devidx, int type)
526{
527 __add_ino_entry(sbi, ino, devidx, type);
528}
529
Chao Yu4d57b862018-05-30 00:20:41 +0800530bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
Chao Yu39d787b2017-09-29 13:59:38 +0800531 unsigned int devidx, int type)
532{
533 struct inode_management *im = &sbi->im[type];
534 struct ino_entry *e;
535 bool is_dirty = false;
536
537 spin_lock(&im->ino_lock);
538 e = radix_tree_lookup(&im->ino_root, ino);
539 if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device))
540 is_dirty = true;
541 spin_unlock(&im->ino_lock);
542 return is_dirty;
543}
544
Chao Yu4d57b862018-05-30 00:20:41 +0800545int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900546{
Chao Yu67298802014-11-18 11:18:36 +0800547 struct inode_management *im = &sbi->im[ORPHAN_INO];
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900548 int err = 0;
549
Chao Yu67298802014-11-18 11:18:36 +0800550 spin_lock(&im->ino_lock);
Jaegeuk Kimcb789422016-04-29 16:29:22 -0700551
Chao Yu1ecc0c52016-09-23 21:30:09 +0800552 if (time_to_inject(sbi, FAULT_ORPHAN)) {
Jaegeuk Kimcb789422016-04-29 16:29:22 -0700553 spin_unlock(&im->ino_lock);
Chao Yu55523512017-02-25 11:08:28 +0800554 f2fs_show_injection_info(FAULT_ORPHAN);
Jaegeuk Kimcb789422016-04-29 16:29:22 -0700555 return -ENOSPC;
556 }
Arnd Bergmann7fa750a2018-08-13 23:38:06 +0200557
Chao Yu67298802014-11-18 11:18:36 +0800558 if (unlikely(im->ino_num >= sbi->max_orphans))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900559 err = -ENOSPC;
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900560 else
Chao Yu67298802014-11-18 11:18:36 +0800561 im->ino_num++;
562 spin_unlock(&im->ino_lock);
Gu Zheng0d47c1a2013-12-26 18:24:19 +0800563
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900564 return err;
565}
566
Chao Yu4d57b862018-05-30 00:20:41 +0800567void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi)
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900568{
Chao Yu67298802014-11-18 11:18:36 +0800569 struct inode_management *im = &sbi->im[ORPHAN_INO];
570
571 spin_lock(&im->ino_lock);
572 f2fs_bug_on(sbi, im->ino_num == 0);
573 im->ino_num--;
574 spin_unlock(&im->ino_lock);
Jaegeuk Kimcbd56e72013-07-30 11:36:53 +0900575}
576
Chao Yu4d57b862018-05-30 00:20:41 +0800577void f2fs_add_orphan_inode(struct inode *inode)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900578{
Jaegeuk Kim39efac42014-07-24 18:15:17 -0700579 /* add new orphan ino entry into list */
Chao Yu39d787b2017-09-29 13:59:38 +0800580 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO);
Chao Yu4d57b862018-05-30 00:20:41 +0800581 f2fs_update_inode_page(inode);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900582}
583
Chao Yu4d57b862018-05-30 00:20:41 +0800584void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900585{
Jaegeuk Kim953e6cc2014-07-25 15:47:16 -0700586 /* remove orphan entry from orphan list */
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700587 __remove_ino_entry(sbi, ino, ORPHAN_INO);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900588}
589
Chao Yu8c14bfa2015-08-07 17:58:43 +0800590static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900591{
Chao Yu8c14bfa2015-08-07 17:58:43 +0800592 struct inode *inode;
Jaegeuk Kim5905f9a2016-09-12 15:08:37 -0700593 struct node_info ni;
Chao Yu76a45e32018-06-06 23:55:01 +0800594 int err;
Chao Yu8c14bfa2015-08-07 17:58:43 +0800595
Jaegeuk Kim5905f9a2016-09-12 15:08:37 -0700596 inode = f2fs_iget_retry(sbi->sb, ino);
Chao Yu8c14bfa2015-08-07 17:58:43 +0800597 if (IS_ERR(inode)) {
598 /*
599 * there should be a bug that we can't find the entry
600 * to orphan inode.
601 */
602 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
603 return PTR_ERR(inode);
604 }
605
Jaegeuk Kim0f9ec2a2018-02-07 17:01:48 -0800606 err = dquot_initialize(inode);
Sheng Yonga515d122018-04-21 14:12:50 +0800607 if (err) {
608 iput(inode);
Jaegeuk Kim0f9ec2a2018-02-07 17:01:48 -0800609 goto err_out;
Sheng Yonga515d122018-04-21 14:12:50 +0800610 }
Jaegeuk Kim0f9ec2a2018-02-07 17:01:48 -0800611
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900612 clear_nlink(inode);
613
614 /* truncate all the data during iput */
615 iput(inode);
Jaegeuk Kim5905f9a2016-09-12 15:08:37 -0700616
Chao Yu77357302018-07-17 00:02:17 +0800617 err = f2fs_get_node_info(sbi, ino, &ni);
618 if (err)
619 goto err_out;
Jaegeuk Kim5905f9a2016-09-12 15:08:37 -0700620
621 /* ENOMEM was fully retried in f2fs_evict_inode. */
622 if (ni.blk_addr != NULL_ADDR) {
Jaegeuk Kim0f9ec2a2018-02-07 17:01:48 -0800623 err = -EIO;
624 goto err_out;
Jaegeuk Kim5905f9a2016-09-12 15:08:37 -0700625 }
Chao Yu8c14bfa2015-08-07 17:58:43 +0800626 return 0;
Jaegeuk Kim0f9ec2a2018-02-07 17:01:48 -0800627
628err_out:
629 set_sbi_flag(sbi, SBI_NEED_FSCK);
630 f2fs_msg(sbi->sb, KERN_WARNING,
631 "%s: orphan failed (ino=%x), run fsck to fix.",
632 __func__, ino);
633 return err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900634}
635
Chao Yu4d57b862018-05-30 00:20:41 +0800636int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900637{
Wanpeng Li3c642982015-02-26 07:57:21 +0800638 block_t start_blk, orphan_blocks, i, j;
Chao Yu4b2414d2017-08-08 10:54:31 +0800639 unsigned int s_flags = sbi->sb->s_flags;
640 int err = 0;
Jaegeuk Kimea676732017-10-06 09:14:28 -0700641#ifdef CONFIG_QUOTA
642 int quota_enabled;
643#endif
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900644
Chao Yuaaec2b12016-09-20 11:04:18 +0800645 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
Chao Yu8c14bfa2015-08-07 17:58:43 +0800646 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900647
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800648 if (s_flags & SB_RDONLY) {
Chao Yu4b2414d2017-08-08 10:54:31 +0800649 f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800650 sbi->sb->s_flags &= ~SB_RDONLY;
Chao Yu4b2414d2017-08-08 10:54:31 +0800651 }
652
653#ifdef CONFIG_QUOTA
654 /* Needed for iput() to work correctly and not trash data */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800655 sbi->sb->s_flags |= SB_ACTIVE;
Jaegeuk Kimea676732017-10-06 09:14:28 -0700656
Sheng Yong76cf05d2018-07-26 19:24:25 +0800657 /*
658 * Turn on quotas which were not enabled for read-only mounts if
659 * filesystem has quota feature, so that they are updated correctly.
660 */
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800661 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
Chao Yu4b2414d2017-08-08 10:54:31 +0800662#endif
663
Wanpeng Li55141482015-02-26 07:57:20 +0800664 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
Wanpeng Li3c642982015-02-26 07:57:21 +0800665 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900666
Chao Yu4d57b862018-05-30 00:20:41 +0800667 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
Chao Yu662befd2014-02-07 16:11:53 +0800668
Wanpeng Li3c642982015-02-26 07:57:21 +0800669 for (i = 0; i < orphan_blocks; i++) {
Chao Yu77357302018-07-17 00:02:17 +0800670 struct page *page;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900671 struct f2fs_orphan_block *orphan_blk;
672
Chao Yu77357302018-07-17 00:02:17 +0800673 page = f2fs_get_meta_page(sbi, start_blk + i);
674 if (IS_ERR(page)) {
675 err = PTR_ERR(page);
676 goto out;
677 }
678
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900679 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
680 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
681 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
Chao Yu8c14bfa2015-08-07 17:58:43 +0800682 err = recover_orphan_inode(sbi, ino);
683 if (err) {
684 f2fs_put_page(page, 1);
Chao Yu4b2414d2017-08-08 10:54:31 +0800685 goto out;
Chao Yu8c14bfa2015-08-07 17:58:43 +0800686 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900687 }
688 f2fs_put_page(page, 1);
689 }
690 /* clear Orphan Flag */
Chao Yuaaec2b12016-09-20 11:04:18 +0800691 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
Chao Yu4b2414d2017-08-08 10:54:31 +0800692out:
Chao Yu13787522018-08-22 17:11:05 +0800693 set_sbi_flag(sbi, SBI_IS_RECOVERED);
694
Chao Yu4b2414d2017-08-08 10:54:31 +0800695#ifdef CONFIG_QUOTA
696 /* Turn quotas off */
Jaegeuk Kimea676732017-10-06 09:14:28 -0700697 if (quota_enabled)
698 f2fs_quota_off_umount(sbi->sb);
Chao Yu4b2414d2017-08-08 10:54:31 +0800699#endif
Linus Torvalds1751e8a2017-11-27 13:05:09 -0800700 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
Chao Yu4b2414d2017-08-08 10:54:31 +0800701
702 return err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900703}
704
705static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
706{
Gu Zheng502c6e02013-11-19 18:03:58 +0800707 struct list_head *head;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900708 struct f2fs_orphan_block *orphan_blk = NULL;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900709 unsigned int nentries = 0;
Chao Yubd936f82015-07-13 17:44:25 +0800710 unsigned short index = 1;
Jaegeuk Kim8c402942014-11-06 15:16:04 -0800711 unsigned short orphan_blocks;
Gu Zheng45319292014-01-10 18:09:02 +0800712 struct page *page = NULL;
Jaegeuk Kim6451e042014-07-25 15:47:17 -0700713 struct ino_entry *orphan = NULL;
Chao Yu67298802014-11-18 11:18:36 +0800714 struct inode_management *im = &sbi->im[ORPHAN_INO];
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900715
Chao Yu67298802014-11-18 11:18:36 +0800716 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
Jaegeuk Kim8c402942014-11-06 15:16:04 -0800717
Jaegeuk Kimd6c67a42015-05-01 11:08:59 -0700718 /*
719 * we don't need to do spin_lock(&im->ino_lock) here, since all the
720 * orphan inode operations are covered under f2fs_lock_op().
721 * And, spin_lock should be avoided due to page operations below.
722 */
Chao Yu67298802014-11-18 11:18:36 +0800723 head = &im->ino_list;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900724
725 /* loop for each orphan inode entry and write them in Jornal block */
Gu Zheng502c6e02013-11-19 18:03:58 +0800726 list_for_each_entry(orphan, head, list) {
727 if (!page) {
Chao Yu4d57b862018-05-30 00:20:41 +0800728 page = f2fs_grab_meta_page(sbi, start_blk++);
Gu Zheng502c6e02013-11-19 18:03:58 +0800729 orphan_blk =
730 (struct f2fs_orphan_block *)page_address(page);
731 memset(orphan_blk, 0, sizeof(*orphan_blk));
732 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900733
Gu Zheng36795562013-11-26 16:44:16 +0800734 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900735
Gu Zheng36795562013-11-26 16:44:16 +0800736 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900737 /*
738 * an orphan block is full of 1020 entries,
739 * then we need to flush current orphan blocks
740 * and bring another one in memory
741 */
742 orphan_blk->blk_addr = cpu_to_le16(index);
743 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
744 orphan_blk->entry_count = cpu_to_le32(nentries);
745 set_page_dirty(page);
746 f2fs_put_page(page, 1);
747 index++;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900748 nentries = 0;
749 page = NULL;
750 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900751 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900752
Gu Zheng502c6e02013-11-19 18:03:58 +0800753 if (page) {
754 orphan_blk->blk_addr = cpu_to_le16(index);
755 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
756 orphan_blk->entry_count = cpu_to_le32(nentries);
757 set_page_dirty(page);
758 f2fs_put_page(page, 1);
759 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900760}
761
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800762static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
763 struct f2fs_checkpoint **cp_block, struct page **cp_page,
764 unsigned long long *version)
765{
766 unsigned long blk_size = sbi->blocksize;
767 size_t crc_offset = 0;
768 __u32 crc = 0;
769
Chao Yu4d57b862018-05-30 00:20:41 +0800770 *cp_page = f2fs_get_meta_page(sbi, cp_addr);
Chao Yu77357302018-07-17 00:02:17 +0800771 if (IS_ERR(*cp_page))
772 return PTR_ERR(*cp_page);
773
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800774 *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
775
776 crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
Kinglong Meec6f89df2017-03-15 21:12:50 +0800777 if (crc_offset > (blk_size - sizeof(__le32))) {
Chao Yud3f07c02018-08-02 22:59:12 +0800778 f2fs_put_page(*cp_page, 1);
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800779 f2fs_msg(sbi->sb, KERN_WARNING,
780 "invalid crc_offset: %zu", crc_offset);
781 return -EINVAL;
782 }
783
Kinglong Meeced2c7e2017-02-25 19:53:39 +0800784 crc = cur_cp_crc(*cp_block);
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800785 if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
Chao Yud3f07c02018-08-02 22:59:12 +0800786 f2fs_put_page(*cp_page, 1);
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800787 f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
788 return -EINVAL;
789 }
790
791 *version = cur_cp_version(*cp_block);
792 return 0;
793}
794
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900795static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
796 block_t cp_addr, unsigned long long *version)
797{
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800798 struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
799 struct f2fs_checkpoint *cp_block = NULL;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900800 unsigned long long cur_version = 0, pre_version = 0;
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800801 int err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900802
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800803 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
804 &cp_page_1, version);
805 if (err)
Chao Yud3f07c02018-08-02 22:59:12 +0800806 return NULL;
Chao Yuc9b60782018-08-01 19:13:44 +0800807
808 if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
809 sbi->blocks_per_seg) {
810 f2fs_msg(sbi->sb, KERN_WARNING,
811 "invalid cp_pack_total_block_count:%u",
812 le32_to_cpu(cp_block->cp_pack_total_block_count));
Chao Yud3f07c02018-08-02 22:59:12 +0800813 goto invalid_cp;
Chao Yuc9b60782018-08-01 19:13:44 +0800814 }
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800815 pre_version = *version;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900816
Jaegeuk Kim25ca9232012-11-28 16:12:41 +0900817 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800818 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
819 &cp_page_2, version);
820 if (err)
Chao Yud3f07c02018-08-02 22:59:12 +0800821 goto invalid_cp;
Tiezhu Yangfc0065a2016-09-30 08:24:53 +0800822 cur_version = *version;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900823
824 if (cur_version == pre_version) {
825 *version = cur_version;
826 f2fs_put_page(cp_page_2, 1);
827 return cp_page_1;
828 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900829 f2fs_put_page(cp_page_2, 1);
Chao Yud3f07c02018-08-02 22:59:12 +0800830invalid_cp:
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900831 f2fs_put_page(cp_page_1, 1);
832 return NULL;
833}
834
Chao Yu4d57b862018-05-30 00:20:41 +0800835int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900836{
837 struct f2fs_checkpoint *cp_block;
838 struct f2fs_super_block *fsb = sbi->raw_super;
839 struct page *cp1, *cp2, *cur_page;
840 unsigned long blk_size = sbi->blocksize;
841 unsigned long long cp1_version = 0, cp2_version = 0;
842 unsigned long long cp_start_blk_no;
Wanpeng Li55141482015-02-26 07:57:20 +0800843 unsigned int cp_blks = 1 + __cp_payload(sbi);
Changman Lee1dbe4152014-05-12 12:27:43 +0900844 block_t cp_blk_no;
845 int i;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900846
Kees Cook026f0502018-06-12 14:28:23 -0700847 sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
848 GFP_KERNEL);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900849 if (!sbi->ckpt)
850 return -ENOMEM;
851 /*
852 * Finding out valid cp block involves read both
853 * sets( cp pack1 and cp pack 2)
854 */
855 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
856 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
857
858 /* The second checkpoint pack should start at the next segment */
Jaegeuk Kimf9a4e6d2013-11-28 12:44:05 +0900859 cp_start_blk_no += ((unsigned long long)1) <<
860 le32_to_cpu(fsb->log_blocks_per_seg);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900861 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
862
863 if (cp1 && cp2) {
864 if (ver_after(cp2_version, cp1_version))
865 cur_page = cp2;
866 else
867 cur_page = cp1;
868 } else if (cp1) {
869 cur_page = cp1;
870 } else if (cp2) {
871 cur_page = cp2;
872 } else {
873 goto fail_no_cp;
874 }
875
876 cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
877 memcpy(sbi->ckpt, cp_block, blk_size);
878
Jaegeuk Kim8508e442016-11-24 12:45:15 -0800879 if (cur_page == cp1)
880 sbi->cur_cp_pack = 1;
881 else
882 sbi->cur_cp_pack = 2;
Shawn Lin984ec632016-02-17 11:26:32 +0800883
Chao Yue494c2f2018-08-01 19:16:11 +0800884 /* Sanity checking of checkpoint */
885 if (f2fs_sanity_check_ckpt(sbi))
886 goto free_fail_no_cp;
887
Changman Lee1dbe4152014-05-12 12:27:43 +0900888 if (cp_blks <= 1)
889 goto done;
890
891 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
892 if (cur_page == cp2)
893 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
894
895 for (i = 1; i < cp_blks; i++) {
896 void *sit_bitmap_ptr;
897 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
898
Chao Yu4d57b862018-05-30 00:20:41 +0800899 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
Chao Yu77357302018-07-17 00:02:17 +0800900 if (IS_ERR(cur_page))
901 goto free_fail_no_cp;
Changman Lee1dbe4152014-05-12 12:27:43 +0900902 sit_bitmap_ptr = page_address(cur_page);
903 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
904 f2fs_put_page(cur_page, 1);
905 }
906done:
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900907 f2fs_put_page(cp1, 1);
908 f2fs_put_page(cp2, 1);
909 return 0;
910
Jaegeuk Kima2125ff2016-12-05 17:25:32 -0800911free_fail_no_cp:
912 f2fs_put_page(cp1, 1);
913 f2fs_put_page(cp2, 1);
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900914fail_no_cp:
915 kfree(sbi->ckpt);
916 return -EINVAL;
917}
918
Chao Yuc227f912015-12-16 13:09:20 +0800919static void __add_dirty_inode(struct inode *inode, enum inode_type type)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900920{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700921 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuc227f912015-12-16 13:09:20 +0800922 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900923
Jaegeuk Kim91942322016-05-20 10:13:22 -0700924 if (is_inode_flag_set(inode, flag))
Chao Yu2710fd72015-12-15 13:30:45 +0800925 return;
Chao Yu2d7b8222014-03-29 11:33:17 +0800926
Jaegeuk Kim91942322016-05-20 10:13:22 -0700927 set_inode_flag(inode, flag);
Chao Yu99f4b912017-03-22 17:23:46 +0800928 if (!f2fs_is_volatile_file(inode))
929 list_add_tail(&F2FS_I(inode)->dirty_list,
930 &sbi->inode_list[type]);
Chao Yu33fbd512015-12-17 17:14:44 +0800931 stat_inc_dirty_inode(sbi, type);
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900932}
933
Chao Yuc227f912015-12-16 13:09:20 +0800934static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
Chao Yu6ad76092015-12-15 13:31:40 +0800935{
Chao Yuc227f912015-12-16 13:09:20 +0800936 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
Chao Yu6ad76092015-12-15 13:31:40 +0800937
Jaegeuk Kim91942322016-05-20 10:13:22 -0700938 if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
Chao Yu6ad76092015-12-15 13:31:40 +0800939 return;
940
Jaegeuk Kim91942322016-05-20 10:13:22 -0700941 list_del_init(&F2FS_I(inode)->dirty_list);
942 clear_inode_flag(inode, flag);
Chao Yu33fbd512015-12-17 17:14:44 +0800943 stat_dec_dirty_inode(F2FS_I_SB(inode), type);
Chao Yu6ad76092015-12-15 13:31:40 +0800944}
945
Chao Yu4d57b862018-05-30 00:20:41 +0800946void f2fs_update_dirty_page(struct inode *inode, struct page *page)
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900947{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700948 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuc227f912015-12-16 13:09:20 +0800949 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900950
Chao Yu5ac9f362015-06-29 18:14:10 +0800951 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
952 !S_ISLNK(inode->i_mode))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900953 return;
Gu Zheng7bd59382013-10-22 14:52:26 +0800954
Jaegeuk Kim1c4bf762016-06-01 20:55:51 -0700955 spin_lock(&sbi->inode_lock[type]);
956 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
Jaegeuk Kim10aa97c2016-05-16 10:33:40 -0700957 __add_dirty_inode(inode, type);
Yunlei Heb951a4e2016-05-13 14:57:43 +0800958 inode_inc_dirty_pages(inode);
Jaegeuk Kim1c4bf762016-06-01 20:55:51 -0700959 spin_unlock(&sbi->inode_lock[type]);
960
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -0700961 SetPagePrivate(page);
Jaegeuk Kim9e4ded32014-12-17 19:58:58 -0800962 f2fs_trace_pid(page);
Jaegeuk Kim5deb8262013-06-05 17:42:45 +0900963}
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900964
Chao Yu4d57b862018-05-30 00:20:41 +0800965void f2fs_remove_dirty_inode(struct inode *inode)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900966{
Jaegeuk Kim40813632014-09-02 15:31:18 -0700967 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuc227f912015-12-16 13:09:20 +0800968 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900969
Chao Yuc227f912015-12-16 13:09:20 +0800970 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
971 !S_ISLNK(inode->i_mode))
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900972 return;
973
Jaegeuk Kim10aa97c2016-05-16 10:33:40 -0700974 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
975 return;
976
Chao Yuc227f912015-12-16 13:09:20 +0800977 spin_lock(&sbi->inode_lock[type]);
978 __remove_dirty_inode(inode, type);
979 spin_unlock(&sbi->inode_lock[type]);
Jaegeuk Kim74d0b912013-05-15 16:40:02 +0900980}
981
Chao Yu4d57b862018-05-30 00:20:41 +0800982int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900983{
Gu Zhengce3b7d82013-11-19 18:03:47 +0800984 struct list_head *head;
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900985 struct inode *inode;
Chao Yu2710fd72015-12-15 13:30:45 +0800986 struct f2fs_inode_info *fi;
Chao Yu4cf18532015-12-17 17:17:16 +0800987 bool is_dir = (type == DIR_INODE);
Jaegeuk Kim4db08d02017-07-14 11:45:21 -0700988 unsigned long ino = 0;
Chao Yu4cf18532015-12-17 17:17:16 +0800989
990 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
991 get_pages(sbi, is_dir ?
992 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
Jaegeuk Kim127e6702012-11-02 17:08:18 +0900993retry:
Jaegeuk Kimaf41d3e2014-10-17 14:14:16 -0700994 if (unlikely(f2fs_cp_error(sbi)))
Chao Yu6d5a1492015-12-24 18:04:56 +0800995 return -EIO;
Jaegeuk Kimaf41d3e2014-10-17 14:14:16 -0700996
Chao Yuc227f912015-12-16 13:09:20 +0800997 spin_lock(&sbi->inode_lock[type]);
Gu Zhengce3b7d82013-11-19 18:03:47 +0800998
Chao Yuc227f912015-12-16 13:09:20 +0800999 head = &sbi->inode_list[type];
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001000 if (list_empty(head)) {
Chao Yuc227f912015-12-16 13:09:20 +08001001 spin_unlock(&sbi->inode_lock[type]);
Chao Yu4cf18532015-12-17 17:17:16 +08001002 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1003 get_pages(sbi, is_dir ?
1004 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
Chao Yu6d5a1492015-12-24 18:04:56 +08001005 return 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001006 }
Chao Yu939afa92017-01-07 18:49:42 +08001007 fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
Chao Yu2710fd72015-12-15 13:30:45 +08001008 inode = igrab(&fi->vfs_inode);
Chao Yuc227f912015-12-16 13:09:20 +08001009 spin_unlock(&sbi->inode_lock[type]);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001010 if (inode) {
Jaegeuk Kim4db08d02017-07-14 11:45:21 -07001011 unsigned long cur_ino = inode->i_ino;
1012
Chao Yub0af6d42017-08-02 23:21:48 +08001013 if (is_dir)
1014 F2FS_I(inode)->cp_task = current;
1015
Jaegeuk Kim87d6f892014-03-18 12:40:49 +09001016 filemap_fdatawrite(inode->i_mapping);
Chao Yub0af6d42017-08-02 23:21:48 +08001017
1018 if (is_dir)
1019 F2FS_I(inode)->cp_task = NULL;
1020
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001021 iput(inode);
Jaegeuk Kim4db08d02017-07-14 11:45:21 -07001022 /* We need to give cpu to another writers. */
Yunlei He2a635312018-06-21 14:49:06 +08001023 if (ino == cur_ino)
Jaegeuk Kim4db08d02017-07-14 11:45:21 -07001024 cond_resched();
Yunlei He2a635312018-06-21 14:49:06 +08001025 else
Jaegeuk Kim4db08d02017-07-14 11:45:21 -07001026 ino = cur_ino;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001027 } else {
1028 /*
1029 * We should submit bio, since it exists several
1030 * wribacking dentry pages in the freeing inode.
1031 */
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07001032 f2fs_submit_merged_write(sbi, DATA);
Sebastian Andrzej Siewior7ecebe52015-02-27 13:13:14 +01001033 cond_resched();
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001034 }
1035 goto retry;
1036}
1037
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07001038int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
1039{
1040 struct list_head *head = &sbi->inode_list[DIRTY_META];
1041 struct inode *inode;
1042 struct f2fs_inode_info *fi;
1043 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
1044
1045 while (total--) {
1046 if (unlikely(f2fs_cp_error(sbi)))
1047 return -EIO;
1048
1049 spin_lock(&sbi->inode_lock[DIRTY_META]);
1050 if (list_empty(head)) {
1051 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1052 return 0;
1053 }
Chao Yu939afa92017-01-07 18:49:42 +08001054 fi = list_first_entry(head, struct f2fs_inode_info,
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07001055 gdirty_list);
1056 inode = igrab(&fi->vfs_inode);
1057 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1058 if (inode) {
Jaegeuk Kim18340ed2016-10-19 18:27:56 -07001059 sync_inode_metadata(inode, 0);
1060
1061 /* it's on eviction */
1062 if (is_inode_flag_set(inode, FI_DIRTY_INODE))
Chao Yu4d57b862018-05-30 00:20:41 +08001063 f2fs_update_inode_page(inode);
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07001064 iput(inode);
1065 }
Chao Yudee668c2017-11-02 20:41:01 +08001066 }
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07001067 return 0;
1068}
1069
Yunlei He59c90812017-03-13 20:22:18 +08001070static void __prepare_cp_block(struct f2fs_sb_info *sbi)
1071{
1072 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1073 struct f2fs_nm_info *nm_i = NM_I(sbi);
1074 nid_t last_nid = nm_i->next_scan_nid;
1075
1076 next_free_nid(sbi, &last_nid);
1077 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1078 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1079 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1080 ckpt->next_free_nid = cpu_to_le32(last_nid);
1081}
1082
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001083/*
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001084 * Freeze all the FS-operations for checkpoint.
1085 */
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001086static int block_operations(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001087{
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001088 struct writeback_control wbc = {
1089 .sync_mode = WB_SYNC_ALL,
1090 .nr_to_write = LONG_MAX,
1091 .for_reclaim = 0,
1092 };
Jaegeuk Kimc718379b2013-04-24 13:19:56 +09001093 struct blk_plug plug;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001094 int err = 0;
Jaegeuk Kimc718379b2013-04-24 13:19:56 +09001095
1096 blk_start_plug(&plug);
1097
Jaegeuk Kim39936832012-11-22 16:21:29 +09001098retry_flush_dents:
Gu Zhenge4795562013-09-27 18:08:30 +08001099 f2fs_lock_all(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001100 /* write all the dirty dentry pages */
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001101 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
Gu Zhenge4795562013-09-27 18:08:30 +08001102 f2fs_unlock_all(sbi);
Chao Yu4d57b862018-05-30 00:20:41 +08001103 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
Chao Yu6d5a1492015-12-24 18:04:56 +08001104 if (err)
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001105 goto out;
Jaegeuk Kim30973882017-04-11 19:15:33 -07001106 cond_resched();
Jaegeuk Kim39936832012-11-22 16:21:29 +09001107 goto retry_flush_dents;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001108 }
1109
Yunlei He59c90812017-03-13 20:22:18 +08001110 /*
1111 * POR: we should ensure that there are no dirty node pages
1112 * until finishing nat/sit flush. inode->i_blocks can be updated.
1113 */
1114 down_write(&sbi->node_change);
1115
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07001116 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
Yunlei He59c90812017-03-13 20:22:18 +08001117 up_write(&sbi->node_change);
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07001118 f2fs_unlock_all(sbi);
1119 err = f2fs_sync_inode_meta(sbi);
1120 if (err)
1121 goto out;
Jaegeuk Kim30973882017-04-11 19:15:33 -07001122 cond_resched();
Jaegeuk Kim0f18b462016-05-20 11:10:10 -07001123 goto retry_flush_dents;
1124 }
1125
Jaegeuk Kim39936832012-11-22 16:21:29 +09001126retry_flush_nodes:
Chao Yub3582c62014-07-03 18:58:39 +08001127 down_write(&sbi->node_write);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001128
1129 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
Chao Yub3582c62014-07-03 18:58:39 +08001130 up_write(&sbi->node_write);
Chao Yuc29fd0c2018-06-04 23:20:36 +08001131 atomic_inc(&sbi->wb_sync_req[NODE]);
Chao Yu4d57b862018-05-30 00:20:41 +08001132 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
Chao Yuc29fd0c2018-06-04 23:20:36 +08001133 atomic_dec(&sbi->wb_sync_req[NODE]);
Chao Yu6d5a1492015-12-24 18:04:56 +08001134 if (err) {
Yunlei He59c90812017-03-13 20:22:18 +08001135 up_write(&sbi->node_change);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001136 f2fs_unlock_all(sbi);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001137 goto out;
1138 }
Jaegeuk Kim30973882017-04-11 19:15:33 -07001139 cond_resched();
Jaegeuk Kim39936832012-11-22 16:21:29 +09001140 goto retry_flush_nodes;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001141 }
Yunlei He59c90812017-03-13 20:22:18 +08001142
1143 /*
1144 * sbi->node_change is used only for AIO write_begin path which produces
1145 * dirty node blocks and some checkpoint values by block allocation.
1146 */
1147 __prepare_cp_block(sbi);
1148 up_write(&sbi->node_change);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001149out:
Jaegeuk Kimc718379b2013-04-24 13:19:56 +09001150 blk_finish_plug(&plug);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001151 return err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001152}
1153
1154static void unblock_operations(struct f2fs_sb_info *sbi)
1155{
Chao Yub3582c62014-07-03 18:58:39 +08001156 up_write(&sbi->node_write);
Gu Zhenge4795562013-09-27 18:08:30 +08001157 f2fs_unlock_all(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001158}
1159
Chao Yu50fa53e2018-08-02 23:03:19 +08001160void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
Changman Leefb51b5e2013-11-07 12:48:25 +09001161{
1162 DEFINE_WAIT(wait);
1163
1164 for (;;) {
1165 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1166
Chao Yu36951b32016-11-16 10:41:20 +08001167 if (!get_pages(sbi, F2FS_WB_CP_DATA))
Changman Leefb51b5e2013-11-07 12:48:25 +09001168 break;
1169
Jaegeuk Kimaf697c02018-07-11 18:30:42 -07001170 if (unlikely(f2fs_cp_error(sbi)))
1171 break;
1172
Yunlei He0ff21642016-02-23 12:07:56 +08001173 io_schedule_timeout(5*HZ);
Changman Leefb51b5e2013-11-07 12:48:25 +09001174 }
1175 finish_wait(&sbi->cp_wait, &wait);
1176}
1177
Jaegeuk Kime4c5d842016-09-30 17:37:43 -07001178static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1179{
1180 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1181 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Chao Yud1aa2452017-07-07 14:10:15 +08001182 unsigned long flags;
Jaegeuk Kime4c5d842016-09-30 17:37:43 -07001183
Chao Yud1aa2452017-07-07 14:10:15 +08001184 spin_lock_irqsave(&sbi->cp_lock, flags);
Jaegeuk Kime4c5d842016-09-30 17:37:43 -07001185
Chao Yuc473f1a2017-04-27 20:40:39 +08001186 if ((cpc->reason & CP_UMOUNT) &&
Kinglong Mee10047f52017-03-11 21:18:01 +08001187 le32_to_cpu(ckpt->cp_pack_total_block_count) >
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08001188 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
1189 disable_nat_bits(sbi, false);
1190
Chao Yu1f43e2a2017-04-28 13:56:08 +08001191 if (cpc->reason & CP_TRIMMED)
1192 __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
Chao Yucd36d7a2018-01-31 09:30:34 +08001193 else
1194 __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
Chao Yu1f43e2a2017-04-28 13:56:08 +08001195
Chao Yuc473f1a2017-04-27 20:40:39 +08001196 if (cpc->reason & CP_UMOUNT)
Jaegeuk Kime4c5d842016-09-30 17:37:43 -07001197 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1198 else
1199 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1200
Chao Yuc473f1a2017-04-27 20:40:39 +08001201 if (cpc->reason & CP_FASTBOOT)
Jaegeuk Kime4c5d842016-09-30 17:37:43 -07001202 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1203 else
1204 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1205
1206 if (orphan_num)
1207 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1208 else
1209 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1210
1211 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1212 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1213
1214 /* set this flag to activate crc|cp_ver for recovery */
1215 __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
Jaegeuk Kimf2367922018-01-19 13:42:33 -08001216 __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
Jaegeuk Kime4c5d842016-09-30 17:37:43 -07001217
Chao Yud1aa2452017-07-07 14:10:15 +08001218 spin_unlock_irqrestore(&sbi->cp_lock, flags);
Jaegeuk Kime4c5d842016-09-30 17:37:43 -07001219}
1220
Gao Xiang46706d52018-02-10 12:12:51 +08001221static void commit_checkpoint(struct f2fs_sb_info *sbi,
1222 void *src, block_t blk_addr)
1223{
1224 struct writeback_control wbc = {
1225 .for_reclaim = 0,
1226 };
1227
1228 /*
1229 * pagevec_lookup_tag and lock_page again will take
Chao Yu4d57b862018-05-30 00:20:41 +08001230 * some extra time. Therefore, f2fs_update_meta_pages and
1231 * f2fs_sync_meta_pages are combined in this function.
Gao Xiang46706d52018-02-10 12:12:51 +08001232 */
Chao Yu4d57b862018-05-30 00:20:41 +08001233 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
Gao Xiang46706d52018-02-10 12:12:51 +08001234 int err;
1235
1236 memcpy(page_address(page), src, PAGE_SIZE);
1237 set_page_dirty(page);
1238
1239 f2fs_wait_on_page_writeback(page, META, true);
1240 f2fs_bug_on(sbi, PageWriteback(page));
1241 if (unlikely(!clear_page_dirty_for_io(page)))
1242 f2fs_bug_on(sbi, 1);
1243
1244 /* writeout cp pack 2 page */
1245 err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
Jaegeuk Kimaf697c02018-07-11 18:30:42 -07001246 if (unlikely(err && f2fs_cp_error(sbi))) {
1247 f2fs_put_page(page, 1);
1248 return;
1249 }
Gao Xiang46706d52018-02-10 12:12:51 +08001250
Jaegeuk Kimaf697c02018-07-11 18:30:42 -07001251 f2fs_bug_on(sbi, err);
Gao Xiang46706d52018-02-10 12:12:51 +08001252 f2fs_put_page(page, 0);
1253
1254 /* submit checkpoint (with barrier if NOBARRIER is not set) */
1255 f2fs_submit_merged_write(sbi, META_FLUSH);
1256}
1257
Chao Yuc34f42e2015-12-23 17:50:30 +08001258static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001259{
1260 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
Huang Ying77041822014-09-12 20:19:48 +08001261 struct f2fs_nm_info *nm_i = NM_I(sbi);
Chao Yud1aa2452017-07-07 14:10:15 +08001262 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001263 block_t start_blk;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001264 unsigned int data_sum_blocks, orphan_blocks;
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +09001265 __u32 crc32 = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001266 int i;
Wanpeng Li55141482015-02-26 07:57:20 +08001267 int cp_payload_blks = __cp_payload(sbi);
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08001268 struct super_block *sb = sbi->sb;
1269 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1270 u64 kbytes_written;
Chao Yu1228b482017-09-29 13:59:39 +08001271 int err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001272
1273 /* Flush all the NAT/SIT pages */
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001274 while (get_pages(sbi, F2FS_DIRTY_META)) {
Chao Yu4d57b862018-05-30 00:20:41 +08001275 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001276 if (unlikely(f2fs_cp_error(sbi)))
Jaegeuk Kimaf697c02018-07-11 18:30:42 -07001277 break;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001278 }
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001279
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001280 /*
1281 * modify checkpoint
1282 * version number is already updated
1283 */
Chao Yua1f72ac22018-06-04 23:20:17 +08001284 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001285 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
Chao Yub5b82202014-08-22 16:17:38 +08001286 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001287 ckpt->cur_node_segno[i] =
1288 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1289 ckpt->cur_node_blkoff[i] =
1290 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1291 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
1292 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1293 }
Chao Yub5b82202014-08-22 16:17:38 +08001294 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001295 ckpt->cur_data_segno[i] =
1296 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1297 ckpt->cur_data_blkoff[i] =
1298 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1299 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
1300 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1301 }
1302
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001303 /* 2 cp + n data seg summary + orphan inode blocks */
Chao Yu4d57b862018-05-30 00:20:41 +08001304 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
Chao Yud1aa2452017-07-07 14:10:15 +08001305 spin_lock_irqsave(&sbi->cp_lock, flags);
Chao Yub5b82202014-08-22 16:17:38 +08001306 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
Chao Yuaaec2b12016-09-20 11:04:18 +08001307 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001308 else
Chao Yuaaec2b12016-09-20 11:04:18 +08001309 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
Chao Yud1aa2452017-07-07 14:10:15 +08001310 spin_unlock_irqrestore(&sbi->cp_lock, flags);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001311
Chao Yu67298802014-11-18 11:18:36 +08001312 orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
Changman Lee1dbe4152014-05-12 12:27:43 +09001313 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
1314 orphan_blocks);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001315
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001316 if (__remain_node_summaries(cpc->reason))
Chao Yub5b82202014-08-22 16:17:38 +08001317 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
Changman Lee1dbe4152014-05-12 12:27:43 +09001318 cp_payload_blks + data_sum_blocks +
1319 orphan_blocks + NR_CURSEG_NODE_TYPE);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001320 else
Chao Yub5b82202014-08-22 16:17:38 +08001321 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
Changman Lee1dbe4152014-05-12 12:27:43 +09001322 cp_payload_blks + data_sum_blocks +
1323 orphan_blocks);
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001324
Jaegeuk Kime4c5d842016-09-30 17:37:43 -07001325 /* update ckpt flag for checkpoint */
1326 update_ckpt_flags(sbi, cpc);
Jaegeuk Kima468f0e2016-09-19 17:55:10 -07001327
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001328 /* update SIT/NAT bitmap */
1329 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1330 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1331
Keith Mok43b65732016-03-02 12:04:24 -08001332 crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
Jaegeuk Kim7e586fa2013-06-19 20:47:19 +09001333 *((__le32 *)((unsigned char *)ckpt +
1334 le32_to_cpu(ckpt->checksum_offset)))
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001335 = cpu_to_le32(crc32);
1336
Jaegeuk Kim8508e442016-11-24 12:45:15 -08001337 start_blk = __start_cp_next_addr(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001338
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08001339 /* write nat bits */
1340 if (enabled_nat_bits(sbi, cpc)) {
1341 __u64 cp_ver = cur_cp_version(ckpt);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08001342 block_t blk;
1343
1344 cp_ver |= ((__u64)crc32 << 32);
1345 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
1346
1347 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
1348 for (i = 0; i < nm_i->nat_bits_blocks; i++)
Chao Yu4d57b862018-05-30 00:20:41 +08001349 f2fs_update_meta_page(sbi, nm_i->nat_bits +
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08001350 (i << F2FS_BLKSIZE_BITS), blk + i);
1351
1352 /* Flush all the NAT BITS pages */
1353 while (get_pages(sbi, F2FS_DIRTY_META)) {
Chao Yu4d57b862018-05-30 00:20:41 +08001354 f2fs_sync_meta_pages(sbi, META, LONG_MAX,
1355 FS_CP_META_IO);
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08001356 if (unlikely(f2fs_cp_error(sbi)))
Jaegeuk Kimaf697c02018-07-11 18:30:42 -07001357 break;
Jaegeuk Kim22ad0b62017-02-09 10:38:09 -08001358 }
1359 }
1360
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001361 /* write out checkpoint buffer at block 0 */
Chao Yu4d57b862018-05-30 00:20:41 +08001362 f2fs_update_meta_page(sbi, ckpt, start_blk++);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001363
Chao Yu381722d2015-05-19 17:40:04 +08001364 for (i = 1; i < 1 + cp_payload_blks; i++)
Chao Yu4d57b862018-05-30 00:20:41 +08001365 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
Chao Yu381722d2015-05-19 17:40:04 +08001366 start_blk++);
Changman Lee1dbe4152014-05-12 12:27:43 +09001367
Chao Yu67298802014-11-18 11:18:36 +08001368 if (orphan_num) {
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001369 write_orphan_inodes(sbi, start_blk);
1370 start_blk += orphan_blocks;
1371 }
1372
Chao Yu4d57b862018-05-30 00:20:41 +08001373 f2fs_write_data_summaries(sbi, start_blk);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001374 start_blk += data_sum_blocks;
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08001375
1376 /* Record write statistics in the hot node summary */
1377 kbytes_written = sbi->kbytes_written;
1378 if (sb->s_bdev->bd_part)
1379 kbytes_written += BD_PART_WRITTEN(sbi);
1380
Chao Yub7ad7512016-02-19 18:08:46 +08001381 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
Shuoran Liu8f1dbbb2016-01-27 09:57:30 +08001382
Jaegeuk Kim119ee912015-01-29 11:45:33 -08001383 if (__remain_node_summaries(cpc->reason)) {
Chao Yu4d57b862018-05-30 00:20:41 +08001384 f2fs_write_node_summaries(sbi, start_blk);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001385 start_blk += NR_CURSEG_NODE_TYPE;
1386 }
1387
Gao Xiang46706d52018-02-10 12:12:51 +08001388 /* update user_block_counts */
1389 sbi->last_valid_block_count = sbi->total_valid_block_count;
1390 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001391
Gao Xiang46706d52018-02-10 12:12:51 +08001392 /* Here, we have one bio having CP pack except cp pack 2 page */
Chao Yu4d57b862018-05-30 00:20:41 +08001393 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
Gao Xiang46706d52018-02-10 12:12:51 +08001394
1395 /* wait for previous submitted meta pages writeback */
Chao Yu50fa53e2018-08-02 23:03:19 +08001396 f2fs_wait_on_all_pages_writeback(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001397
Gao Xiang46706d52018-02-10 12:12:51 +08001398 /* flush all device cache */
1399 err = f2fs_flush_device_cache(sbi);
1400 if (err)
1401 return err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001402
Gao Xiang46706d52018-02-10 12:12:51 +08001403 /* barrier and flush checkpoint cp pack 2 page if it can */
1404 commit_checkpoint(sbi, ckpt, start_blk);
Chao Yu50fa53e2018-08-02 23:03:19 +08001405 f2fs_wait_on_all_pages_writeback(sbi);
Jaegeuk Kim6a8f8ca2014-10-29 14:37:22 -07001406
Chao Yu18767e62018-07-27 18:15:13 +08001407 /*
1408 * invalidate intermediate page cache borrowed from meta inode
1409 * which are used for migration of encrypted inode's blocks.
1410 */
1411 if (f2fs_sb_has_encrypt(sbi->sb))
1412 invalidate_mapping_pages(META_MAPPING(sbi),
1413 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
1414
Chao Yu4d57b862018-05-30 00:20:41 +08001415 f2fs_release_ino_entry(sbi, false);
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001416
Chao Yu50fa53e2018-08-02 23:03:19 +08001417 f2fs_reset_fsync_node_info(sbi);
1418
Chao Yucaf00472015-01-28 17:48:42 +08001419 clear_sbi_flag(sbi, SBI_IS_DIRTY);
Jaegeuk Kimbbf156f72016-08-29 18:23:45 -07001420 clear_sbi_flag(sbi, SBI_NEED_CP);
Jaegeuk Kim8508e442016-11-24 12:45:15 -08001421 __set_cp_next_pack(sbi);
Chao Yuc34f42e2015-12-23 17:50:30 +08001422
Chao Yuc2a080a2016-08-31 10:43:19 +08001423 /*
1424 * redirty superblock if metadata like node page or inode cache is
1425 * updated during writing checkpoint.
1426 */
1427 if (get_pages(sbi, F2FS_DIRTY_NODES) ||
1428 get_pages(sbi, F2FS_DIRTY_IMETA))
1429 set_sbi_flag(sbi, SBI_IS_DIRTY);
1430
1431 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1432
Jaegeuk Kimaf697c02018-07-11 18:30:42 -07001433 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001434}
1435
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001436/*
arter97e1c42042014-08-06 23:22:50 +09001437 * We guarantee that this checkpoint procedure will not fail.
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001438 */
Chao Yu4d57b862018-05-30 00:20:41 +08001439int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001440{
1441 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1442 unsigned long long ckpt_ver;
Chao Yuc34f42e2015-12-23 17:50:30 +08001443 int err = 0;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001444
Jaegeuk Kim43727522013-02-04 15:11:17 +09001445 mutex_lock(&sbi->cp_mutex);
Jaegeuk Kim85010172014-08-11 18:37:46 -07001446
Chao Yucaf00472015-01-28 17:48:42 +08001447 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
Chao Yuc473f1a2017-04-27 20:40:39 +08001448 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1449 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
Jaegeuk Kim85010172014-08-11 18:37:46 -07001450 goto out;
Chao Yuc34f42e2015-12-23 17:50:30 +08001451 if (unlikely(f2fs_cp_error(sbi))) {
1452 err = -EIO;
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001453 goto out;
Chao Yuc34f42e2015-12-23 17:50:30 +08001454 }
1455 if (f2fs_readonly(sbi->sb)) {
1456 err = -EROFS;
Jaegeuk Kim11504a82015-01-23 18:43:45 -08001457 goto out;
Chao Yuc34f42e2015-12-23 17:50:30 +08001458 }
Wanpeng Li2bda5422015-02-27 15:56:16 +08001459
1460 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
1461
Chao Yuc34f42e2015-12-23 17:50:30 +08001462 err = block_operations(sbi);
1463 if (err)
Jaegeuk Kimcf779ca2014-08-11 18:37:46 -07001464 goto out;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001465
Jaegeuk Kim75ab4cb2014-09-20 21:57:51 -07001466 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
Namjae Jeon2af4bd62013-04-23 18:26:54 +09001467
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07001468 f2fs_flush_merged_writes(sbi);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001469
Yunlei He58cce382016-08-18 21:01:19 +08001470 /* this is the case of multiple fstrims without any changes */
Chao Yuc473f1a2017-04-27 20:40:39 +08001471 if (cpc->reason & CP_DISCARD) {
Chao Yu4d57b862018-05-30 00:20:41 +08001472 if (!f2fs_exist_trim_candidates(sbi, cpc)) {
Jaegeuk Kim25290fa2016-12-29 22:06:15 -08001473 unblock_operations(sbi);
1474 goto out;
1475 }
1476
Jaegeuk Kim0333ad42016-12-29 16:58:54 -08001477 if (NM_I(sbi)->dirty_nat_cnt == 0 &&
1478 SIT_I(sbi)->dirty_sentries == 0 &&
1479 prefree_segments(sbi) == 0) {
Chao Yu4d57b862018-05-30 00:20:41 +08001480 f2fs_flush_sit_entries(sbi, cpc);
1481 f2fs_clear_prefree_segments(sbi, cpc);
Jaegeuk Kim0333ad42016-12-29 16:58:54 -08001482 unblock_operations(sbi);
1483 goto out;
1484 }
Yunlei He58cce382016-08-18 21:01:19 +08001485 }
1486
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001487 /*
1488 * update checkpoint pack index
1489 * Increase the version number so that
1490 * SIT entries and seg summaries are written at correct place
1491 */
Jaegeuk Kimd71b5562013-08-09 15:03:21 +09001492 ckpt_ver = cur_cp_version(ckpt);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001493 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1494
1495 /* write cached NAT/SIT entries to NAT/SIT area */
Jaegeuk Kimedc55aa2018-09-17 17:36:06 -07001496 err = f2fs_flush_nat_entries(sbi, cpc);
1497 if (err)
1498 goto stop;
1499
Chao Yu4d57b862018-05-30 00:20:41 +08001500 f2fs_flush_sit_entries(sbi, cpc);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001501
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001502 /* unlock all the fs_lock[] in do_checkpoint() */
Chao Yuc34f42e2015-12-23 17:50:30 +08001503 err = do_checkpoint(sbi, cpc);
Jaegeuk Kim4e6a8d92016-12-29 14:07:53 -08001504 if (err)
Chao Yu4d57b862018-05-30 00:20:41 +08001505 f2fs_release_discard_addrs(sbi);
Jaegeuk Kim4e6a8d92016-12-29 14:07:53 -08001506 else
Chao Yu4d57b862018-05-30 00:20:41 +08001507 f2fs_clear_prefree_segments(sbi, cpc);
Jaegeuk Kimedc55aa2018-09-17 17:36:06 -07001508stop:
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001509 unblock_operations(sbi);
Changman Lee942e0be2014-02-13 15:12:29 +09001510 stat_inc_cp_count(sbi->stat_info);
Jaegeuk Kim10027552015-04-09 17:03:53 -07001511
Chao Yuc473f1a2017-04-27 20:40:39 +08001512 if (cpc->reason & CP_RECOVERY)
Jaegeuk Kim10027552015-04-09 17:03:53 -07001513 f2fs_msg(sbi->sb, KERN_NOTICE,
1514 "checkpoint: version = %llx", ckpt_ver);
Jaegeuk Kim60b99b42015-10-05 14:49:57 -07001515
1516 /* do checkpoint periodically */
Jaegeuk Kim6beceb52016-01-08 15:51:50 -08001517 f2fs_update_time(sbi, CP_TIME);
Jaegeuk Kim55d1cdb2015-12-15 16:07:14 -08001518 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
Jaegeuk Kim85010172014-08-11 18:37:46 -07001519out:
1520 mutex_unlock(&sbi->cp_mutex);
Chao Yuc34f42e2015-12-23 17:50:30 +08001521 return err;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001522}
1523
Chao Yu4d57b862018-05-30 00:20:41 +08001524void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001525{
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001526 int i;
1527
1528 for (i = 0; i < MAX_INO_ENTRY; i++) {
Chao Yu67298802014-11-18 11:18:36 +08001529 struct inode_management *im = &sbi->im[i];
1530
1531 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1532 spin_lock_init(&im->ino_lock);
1533 INIT_LIST_HEAD(&im->ino_list);
1534 im->ino_num = 0;
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001535 }
1536
Chao Yub5b82202014-08-22 16:17:38 +08001537 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
Wanpeng Li14b42812015-02-27 17:38:13 +08001538 NR_CURSEG_TYPE - __cp_payload(sbi)) *
1539 F2FS_ORPHANS_PER_BLOCK;
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001540}
1541
Chao Yu4d57b862018-05-30 00:20:41 +08001542int __init f2fs_create_checkpoint_caches(void)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001543{
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001544 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1545 sizeof(struct ino_entry));
1546 if (!ino_entry_slab)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001547 return -ENOMEM;
Chao Yu4d57b862018-05-30 00:20:41 +08001548 f2fs_inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
Chao Yu06292072014-12-29 15:56:18 +08001549 sizeof(struct inode_entry));
Chao Yu4d57b862018-05-30 00:20:41 +08001550 if (!f2fs_inode_entry_slab) {
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001551 kmem_cache_destroy(ino_entry_slab);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001552 return -ENOMEM;
1553 }
1554 return 0;
1555}
1556
Chao Yu4d57b862018-05-30 00:20:41 +08001557void f2fs_destroy_checkpoint_caches(void)
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001558{
Jaegeuk Kim6451e042014-07-25 15:47:17 -07001559 kmem_cache_destroy(ino_entry_slab);
Chao Yu4d57b862018-05-30 00:20:41 +08001560 kmem_cache_destroy(f2fs_inode_entry_slab);
Jaegeuk Kim127e6702012-11-02 17:08:18 +09001561}