2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include <linux/fiemap.h>
44 #include "ext4_jbd2.h"
45 #include "ext4_extents.h"
47 #include <trace/events/ext4.h>
49 static int ext4_split_extent(handle_t *handle,
51 struct ext4_ext_path *path,
52 struct ext4_map_blocks *map,
56 static int ext4_ext_truncate_extend_restart(handle_t *handle,
62 if (!ext4_handle_valid(handle))
64 if (handle->h_buffer_credits > needed)
66 err = ext4_journal_extend(handle, needed);
69 err = ext4_truncate_restart_trans(handle, inode, needed);
81 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
82 struct ext4_ext_path *path)
85 /* path points to block */
86 return ext4_journal_get_write_access(handle, path->p_bh);
88 /* path points to leaf/index in inode body */
89 /* we use in-core data, no need to protect them */
99 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
100 struct ext4_ext_path *path)
104 /* path points to block */
105 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
107 /* path points to leaf/index in inode body */
108 err = ext4_mark_inode_dirty(handle, inode);
113 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
114 struct ext4_ext_path *path,
117 struct ext4_inode_info *ei = EXT4_I(inode);
118 ext4_fsblk_t bg_start;
119 ext4_fsblk_t last_block;
120 ext4_grpblk_t colour;
121 ext4_group_t block_group;
122 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
126 struct ext4_extent *ex;
127 depth = path->p_depth;
130 * Try to predict block placement assuming that we are
131 * filling in a file which will eventually be
132 * non-sparse --- i.e., in the case of libbfd writing
133 * an ELF object sections out-of-order but in a way
134 * the eventually results in a contiguous object or
135 * executable file, or some database extending a table
136 * space file. However, this is actually somewhat
137 * non-ideal if we are writing a sparse file such as
138 * qemu or KVM writing a raw image file that is going
139 * to stay fairly sparse, since it will end up
140 * fragmenting the file system's free space. Maybe we
141 * should have some hueristics or some way to allow
142 * userspace to pass a hint to file system,
143 * especially if the latter case turns out to be
146 ex = path[depth].p_ext;
148 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
149 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
151 if (block > ext_block)
152 return ext_pblk + (block - ext_block);
154 return ext_pblk - (ext_block - block);
157 /* it looks like index is empty;
158 * try to find starting block from index itself */
159 if (path[depth].p_bh)
160 return path[depth].p_bh->b_blocknr;
163 /* OK. use inode's group */
164 block_group = ei->i_block_group;
165 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
167 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
168 * block groups per flexgroup, reserve the first block
169 * group for directories and special files. Regular
170 * files will start at the second block group. This
171 * tends to speed up directory access and improves
174 block_group &= ~(flex_size-1);
175 if (S_ISREG(inode->i_mode))
178 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
179 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
182 * If we are doing delayed allocation, we don't need take
183 * colour into account.
185 if (test_opt(inode->i_sb, DELALLOC))
188 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
189 colour = (current->pid % 16) *
190 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
192 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
193 return bg_start + colour + block;
197 * Allocation for a meta data block
200 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
201 struct ext4_ext_path *path,
202 struct ext4_extent *ex, int *err, unsigned int flags)
204 ext4_fsblk_t goal, newblock;
206 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
207 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
212 static inline int ext4_ext_space_block(struct inode *inode, int check)
216 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
217 / sizeof(struct ext4_extent);
219 #ifdef AGGRESSIVE_TEST
227 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
231 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
232 / sizeof(struct ext4_extent_idx);
234 #ifdef AGGRESSIVE_TEST
242 static inline int ext4_ext_space_root(struct inode *inode, int check)
246 size = sizeof(EXT4_I(inode)->i_data);
247 size -= sizeof(struct ext4_extent_header);
248 size /= sizeof(struct ext4_extent);
250 #ifdef AGGRESSIVE_TEST
258 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
262 size = sizeof(EXT4_I(inode)->i_data);
263 size -= sizeof(struct ext4_extent_header);
264 size /= sizeof(struct ext4_extent_idx);
266 #ifdef AGGRESSIVE_TEST
275 * Calculate the number of metadata blocks needed
276 * to allocate @blocks
277 * Worse case is one block per extent
279 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
281 struct ext4_inode_info *ei = EXT4_I(inode);
284 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
285 / sizeof(struct ext4_extent_idx));
288 * If the new delayed allocation block is contiguous with the
289 * previous da block, it can share index blocks with the
290 * previous block, so we only need to allocate a new index
291 * block every idxs leaf blocks. At ldxs**2 blocks, we need
292 * an additional index block, and at ldxs**3 blocks, yet
293 * another index blocks.
295 if (ei->i_da_metadata_calc_len &&
296 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
297 if ((ei->i_da_metadata_calc_len % idxs) == 0)
299 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
301 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
303 ei->i_da_metadata_calc_len = 0;
305 ei->i_da_metadata_calc_len++;
306 ei->i_da_metadata_calc_last_lblock++;
311 * In the worst case we need a new set of index blocks at
312 * every level of the inode's extent tree.
314 ei->i_da_metadata_calc_len = 1;
315 ei->i_da_metadata_calc_last_lblock = lblock;
316 return ext_depth(inode) + 1;
320 ext4_ext_max_entries(struct inode *inode, int depth)
324 if (depth == ext_depth(inode)) {
326 max = ext4_ext_space_root(inode, 1);
328 max = ext4_ext_space_root_idx(inode, 1);
331 max = ext4_ext_space_block(inode, 1);
333 max = ext4_ext_space_block_idx(inode, 1);
339 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
341 ext4_fsblk_t block = ext4_ext_pblock(ext);
342 int len = ext4_ext_get_actual_len(ext);
344 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
347 static int ext4_valid_extent_idx(struct inode *inode,
348 struct ext4_extent_idx *ext_idx)
350 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
352 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
355 static int ext4_valid_extent_entries(struct inode *inode,
356 struct ext4_extent_header *eh,
359 struct ext4_extent *ext;
360 struct ext4_extent_idx *ext_idx;
361 unsigned short entries;
362 if (eh->eh_entries == 0)
365 entries = le16_to_cpu(eh->eh_entries);
369 ext = EXT_FIRST_EXTENT(eh);
371 if (!ext4_valid_extent(inode, ext))
377 ext_idx = EXT_FIRST_INDEX(eh);
379 if (!ext4_valid_extent_idx(inode, ext_idx))
388 static int __ext4_ext_check(const char *function, unsigned int line,
389 struct inode *inode, struct ext4_extent_header *eh,
392 const char *error_msg;
395 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
396 error_msg = "invalid magic";
399 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
400 error_msg = "unexpected eh_depth";
403 if (unlikely(eh->eh_max == 0)) {
404 error_msg = "invalid eh_max";
407 max = ext4_ext_max_entries(inode, depth);
408 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
409 error_msg = "too large eh_max";
412 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
413 error_msg = "invalid eh_entries";
416 if (!ext4_valid_extent_entries(inode, eh, depth)) {
417 error_msg = "invalid extent entries";
423 ext4_error_inode(inode, function, line, 0,
424 "bad header/extent: %s - magic %x, "
425 "entries %u, max %u(%u), depth %u(%u)",
426 error_msg, le16_to_cpu(eh->eh_magic),
427 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
428 max, le16_to_cpu(eh->eh_depth), depth);
433 #define ext4_ext_check(inode, eh, depth) \
434 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
436 int ext4_ext_check_inode(struct inode *inode)
438 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
442 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
444 int k, l = path->p_depth;
447 for (k = 0; k <= l; k++, path++) {
449 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
450 ext4_idx_pblock(path->p_idx));
451 } else if (path->p_ext) {
452 ext_debug(" %d:[%d]%d:%llu ",
453 le32_to_cpu(path->p_ext->ee_block),
454 ext4_ext_is_uninitialized(path->p_ext),
455 ext4_ext_get_actual_len(path->p_ext),
456 ext4_ext_pblock(path->p_ext));
463 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
465 int depth = ext_depth(inode);
466 struct ext4_extent_header *eh;
467 struct ext4_extent *ex;
473 eh = path[depth].p_hdr;
474 ex = EXT_FIRST_EXTENT(eh);
476 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
478 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
479 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
480 ext4_ext_is_uninitialized(ex),
481 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
486 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
487 ext4_fsblk_t newblock, int level)
489 int depth = ext_depth(inode);
490 struct ext4_extent *ex;
492 if (depth != level) {
493 struct ext4_extent_idx *idx;
494 idx = path[level].p_idx;
495 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
496 ext_debug("%d: move %d:%llu in new index %llu\n", level,
497 le32_to_cpu(idx->ei_block),
498 ext4_idx_pblock(idx),
506 ex = path[depth].p_ext;
507 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
508 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
509 le32_to_cpu(ex->ee_block),
511 ext4_ext_is_uninitialized(ex),
512 ext4_ext_get_actual_len(ex),
519 #define ext4_ext_show_path(inode, path)
520 #define ext4_ext_show_leaf(inode, path)
521 #define ext4_ext_show_move(inode, path, newblock, level)
524 void ext4_ext_drop_refs(struct ext4_ext_path *path)
526 int depth = path->p_depth;
529 for (i = 0; i <= depth; i++, path++)
537 * ext4_ext_binsearch_idx:
538 * binary search for the closest index of the given block
539 * the header must be checked before calling this
542 ext4_ext_binsearch_idx(struct inode *inode,
543 struct ext4_ext_path *path, ext4_lblk_t block)
545 struct ext4_extent_header *eh = path->p_hdr;
546 struct ext4_extent_idx *r, *l, *m;
549 ext_debug("binsearch for %u(idx): ", block);
551 l = EXT_FIRST_INDEX(eh) + 1;
552 r = EXT_LAST_INDEX(eh);
555 if (block < le32_to_cpu(m->ei_block))
559 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
560 m, le32_to_cpu(m->ei_block),
561 r, le32_to_cpu(r->ei_block));
565 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
566 ext4_idx_pblock(path->p_idx));
568 #ifdef CHECK_BINSEARCH
570 struct ext4_extent_idx *chix, *ix;
573 chix = ix = EXT_FIRST_INDEX(eh);
574 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
576 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
577 printk(KERN_DEBUG "k=%d, ix=0x%p, "
579 ix, EXT_FIRST_INDEX(eh));
580 printk(KERN_DEBUG "%u <= %u\n",
581 le32_to_cpu(ix->ei_block),
582 le32_to_cpu(ix[-1].ei_block));
584 BUG_ON(k && le32_to_cpu(ix->ei_block)
585 <= le32_to_cpu(ix[-1].ei_block));
586 if (block < le32_to_cpu(ix->ei_block))
590 BUG_ON(chix != path->p_idx);
597 * ext4_ext_binsearch:
598 * binary search for closest extent of the given block
599 * the header must be checked before calling this
602 ext4_ext_binsearch(struct inode *inode,
603 struct ext4_ext_path *path, ext4_lblk_t block)
605 struct ext4_extent_header *eh = path->p_hdr;
606 struct ext4_extent *r, *l, *m;
608 if (eh->eh_entries == 0) {
610 * this leaf is empty:
611 * we get such a leaf in split/add case
616 ext_debug("binsearch for %u: ", block);
618 l = EXT_FIRST_EXTENT(eh) + 1;
619 r = EXT_LAST_EXTENT(eh);
623 if (block < le32_to_cpu(m->ee_block))
627 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
628 m, le32_to_cpu(m->ee_block),
629 r, le32_to_cpu(r->ee_block));
633 ext_debug(" -> %d:%llu:[%d]%d ",
634 le32_to_cpu(path->p_ext->ee_block),
635 ext4_ext_pblock(path->p_ext),
636 ext4_ext_is_uninitialized(path->p_ext),
637 ext4_ext_get_actual_len(path->p_ext));
639 #ifdef CHECK_BINSEARCH
641 struct ext4_extent *chex, *ex;
644 chex = ex = EXT_FIRST_EXTENT(eh);
645 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
646 BUG_ON(k && le32_to_cpu(ex->ee_block)
647 <= le32_to_cpu(ex[-1].ee_block));
648 if (block < le32_to_cpu(ex->ee_block))
652 BUG_ON(chex != path->p_ext);
658 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
660 struct ext4_extent_header *eh;
662 eh = ext_inode_hdr(inode);
665 eh->eh_magic = EXT4_EXT_MAGIC;
666 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
667 ext4_mark_inode_dirty(handle, inode);
668 ext4_ext_invalidate_cache(inode);
672 struct ext4_ext_path *
673 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
674 struct ext4_ext_path *path)
676 struct ext4_extent_header *eh;
677 struct buffer_head *bh;
678 short int depth, i, ppos = 0, alloc = 0;
680 eh = ext_inode_hdr(inode);
681 depth = ext_depth(inode);
683 /* account possible depth increase */
685 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
688 return ERR_PTR(-ENOMEM);
695 /* walk through the tree */
697 int need_to_validate = 0;
699 ext_debug("depth %d: num %d, max %d\n",
700 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
702 ext4_ext_binsearch_idx(inode, path + ppos, block);
703 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
704 path[ppos].p_depth = i;
705 path[ppos].p_ext = NULL;
707 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
710 if (!bh_uptodate_or_lock(bh)) {
711 trace_ext4_ext_load_extent(inode, block,
713 if (bh_submit_read(bh) < 0) {
717 /* validate the extent entries */
718 need_to_validate = 1;
720 eh = ext_block_hdr(bh);
722 if (unlikely(ppos > depth)) {
724 EXT4_ERROR_INODE(inode,
725 "ppos %d > depth %d", ppos, depth);
728 path[ppos].p_bh = bh;
729 path[ppos].p_hdr = eh;
732 if (need_to_validate && ext4_ext_check(inode, eh, i))
736 path[ppos].p_depth = i;
737 path[ppos].p_ext = NULL;
738 path[ppos].p_idx = NULL;
741 ext4_ext_binsearch(inode, path + ppos, block);
742 /* if not an empty leaf */
743 if (path[ppos].p_ext)
744 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
746 ext4_ext_show_path(inode, path);
751 ext4_ext_drop_refs(path);
754 return ERR_PTR(-EIO);
758 * ext4_ext_insert_index:
759 * insert new index [@logical;@ptr] into the block at @curp;
760 * check where to insert: before @curp or after @curp
762 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
763 struct ext4_ext_path *curp,
764 int logical, ext4_fsblk_t ptr)
766 struct ext4_extent_idx *ix;
769 err = ext4_ext_get_access(handle, inode, curp);
773 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
774 EXT4_ERROR_INODE(inode,
775 "logical %d == ei_block %d!",
776 logical, le32_to_cpu(curp->p_idx->ei_block));
779 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
780 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
782 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
783 len = (len - 1) * sizeof(struct ext4_extent_idx);
784 len = len < 0 ? 0 : len;
785 ext_debug("insert new index %d after: %llu. "
786 "move %d from 0x%p to 0x%p\n",
788 (curp->p_idx + 1), (curp->p_idx + 2));
789 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
791 ix = curp->p_idx + 1;
794 len = len * sizeof(struct ext4_extent_idx);
795 len = len < 0 ? 0 : len;
796 ext_debug("insert new index %d before: %llu. "
797 "move %d from 0x%p to 0x%p\n",
799 curp->p_idx, (curp->p_idx + 1));
800 memmove(curp->p_idx + 1, curp->p_idx, len);
804 ix->ei_block = cpu_to_le32(logical);
805 ext4_idx_store_pblock(ix, ptr);
806 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
808 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
809 > le16_to_cpu(curp->p_hdr->eh_max))) {
810 EXT4_ERROR_INODE(inode,
811 "logical %d == ei_block %d!",
812 logical, le32_to_cpu(curp->p_idx->ei_block));
815 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
816 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
820 err = ext4_ext_dirty(handle, inode, curp);
821 ext4_std_error(inode->i_sb, err);
828 * inserts new subtree into the path, using free index entry
830 * - allocates all needed blocks (new leaf and all intermediate index blocks)
831 * - makes decision where to split
832 * - moves remaining extents and index entries (right to the split point)
833 * into the newly allocated blocks
834 * - initializes subtree
836 static int ext4_ext_split(handle_t *handle, struct inode *inode,
838 struct ext4_ext_path *path,
839 struct ext4_extent *newext, int at)
841 struct buffer_head *bh = NULL;
842 int depth = ext_depth(inode);
843 struct ext4_extent_header *neh;
844 struct ext4_extent_idx *fidx;
846 ext4_fsblk_t newblock, oldblock;
848 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
851 /* make decision: where to split? */
852 /* FIXME: now decision is simplest: at current extent */
854 /* if current leaf will be split, then we should use
855 * border from split point */
856 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
857 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
860 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
861 border = path[depth].p_ext[1].ee_block;
862 ext_debug("leaf will be split."
863 " next leaf starts at %d\n",
864 le32_to_cpu(border));
866 border = newext->ee_block;
867 ext_debug("leaf will be added."
868 " next leaf starts at %d\n",
869 le32_to_cpu(border));
873 * If error occurs, then we break processing
874 * and mark filesystem read-only. index won't
875 * be inserted and tree will be in consistent
876 * state. Next mount will repair buffers too.
880 * Get array to track all allocated blocks.
881 * We need this to handle errors and free blocks
884 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
888 /* allocate all needed blocks */
889 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
890 for (a = 0; a < depth - at; a++) {
891 newblock = ext4_ext_new_meta_block(handle, inode, path,
892 newext, &err, flags);
895 ablocks[a] = newblock;
898 /* initialize new leaf */
899 newblock = ablocks[--a];
900 if (unlikely(newblock == 0)) {
901 EXT4_ERROR_INODE(inode, "newblock == 0!");
905 bh = sb_getblk(inode->i_sb, newblock);
912 err = ext4_journal_get_create_access(handle, bh);
916 neh = ext_block_hdr(bh);
918 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
919 neh->eh_magic = EXT4_EXT_MAGIC;
922 /* move remainder of path[depth] to the new leaf */
923 if (unlikely(path[depth].p_hdr->eh_entries !=
924 path[depth].p_hdr->eh_max)) {
925 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
926 path[depth].p_hdr->eh_entries,
927 path[depth].p_hdr->eh_max);
931 /* start copy from next extent */
932 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
933 ext4_ext_show_move(inode, path, newblock, depth);
935 struct ext4_extent *ex;
936 ex = EXT_FIRST_EXTENT(neh);
937 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
938 le16_add_cpu(&neh->eh_entries, m);
941 set_buffer_uptodate(bh);
944 err = ext4_handle_dirty_metadata(handle, inode, bh);
950 /* correct old leaf */
952 err = ext4_ext_get_access(handle, inode, path + depth);
955 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
956 err = ext4_ext_dirty(handle, inode, path + depth);
962 /* create intermediate indexes */
964 if (unlikely(k < 0)) {
965 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
970 ext_debug("create %d intermediate indices\n", k);
971 /* insert new index into current index block */
972 /* current depth stored in i var */
976 newblock = ablocks[--a];
977 bh = sb_getblk(inode->i_sb, newblock);
984 err = ext4_journal_get_create_access(handle, bh);
988 neh = ext_block_hdr(bh);
989 neh->eh_entries = cpu_to_le16(1);
990 neh->eh_magic = EXT4_EXT_MAGIC;
991 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
992 neh->eh_depth = cpu_to_le16(depth - i);
993 fidx = EXT_FIRST_INDEX(neh);
994 fidx->ei_block = border;
995 ext4_idx_store_pblock(fidx, oldblock);
997 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
998 i, newblock, le32_to_cpu(border), oldblock);
1000 /* move remainder of path[i] to the new index block */
1001 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1002 EXT_LAST_INDEX(path[i].p_hdr))) {
1003 EXT4_ERROR_INODE(inode,
1004 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1005 le32_to_cpu(path[i].p_ext->ee_block));
1009 /* start copy indexes */
1010 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1011 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1012 EXT_MAX_INDEX(path[i].p_hdr));
1013 ext4_ext_show_move(inode, path, newblock, i);
1015 memmove(++fidx, path[i].p_idx,
1016 sizeof(struct ext4_extent_idx) * m);
1017 le16_add_cpu(&neh->eh_entries, m);
1019 set_buffer_uptodate(bh);
1022 err = ext4_handle_dirty_metadata(handle, inode, bh);
1028 /* correct old index */
1030 err = ext4_ext_get_access(handle, inode, path + i);
1033 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1034 err = ext4_ext_dirty(handle, inode, path + i);
1042 /* insert new index */
1043 err = ext4_ext_insert_index(handle, inode, path + at,
1044 le32_to_cpu(border), newblock);
1048 if (buffer_locked(bh))
1054 /* free all allocated blocks in error case */
1055 for (i = 0; i < depth; i++) {
1058 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1059 EXT4_FREE_BLOCKS_METADATA);
1068 * ext4_ext_grow_indepth:
1069 * implements tree growing procedure:
1070 * - allocates new block
1071 * - moves top-level data (index block or leaf) into the new block
1072 * - initializes new top-level, creating index that points to the
1073 * just created block
1075 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1077 struct ext4_ext_path *path,
1078 struct ext4_extent *newext)
1080 struct ext4_ext_path *curp = path;
1081 struct ext4_extent_header *neh;
1082 struct buffer_head *bh;
1083 ext4_fsblk_t newblock;
1086 newblock = ext4_ext_new_meta_block(handle, inode, path,
1087 newext, &err, flags);
1091 bh = sb_getblk(inode->i_sb, newblock);
1094 ext4_std_error(inode->i_sb, err);
1099 err = ext4_journal_get_create_access(handle, bh);
1105 /* move top-level index/leaf into new block */
1106 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
1108 /* set size of new block */
1109 neh = ext_block_hdr(bh);
1110 /* old root could have indexes or leaves
1111 * so calculate e_max right way */
1112 if (ext_depth(inode))
1113 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1115 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1116 neh->eh_magic = EXT4_EXT_MAGIC;
1117 set_buffer_uptodate(bh);
1120 err = ext4_handle_dirty_metadata(handle, inode, bh);
1124 /* create index in new top-level index: num,max,pointer */
1125 err = ext4_ext_get_access(handle, inode, curp);
1129 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1130 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1131 curp->p_hdr->eh_entries = cpu_to_le16(1);
1132 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
1134 if (path[0].p_hdr->eh_depth)
1135 curp->p_idx->ei_block =
1136 EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
1138 curp->p_idx->ei_block =
1139 EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1140 ext4_idx_store_pblock(curp->p_idx, newblock);
1142 neh = ext_inode_hdr(inode);
1143 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1144 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1145 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1146 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1148 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
1149 err = ext4_ext_dirty(handle, inode, curp);
1157 * ext4_ext_create_new_leaf:
1158 * finds empty index and adds new leaf.
1159 * if no free index is found, then it requests in-depth growing.
1161 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1163 struct ext4_ext_path *path,
1164 struct ext4_extent *newext)
1166 struct ext4_ext_path *curp;
1167 int depth, i, err = 0;
1170 i = depth = ext_depth(inode);
1172 /* walk up to the tree and look for free index entry */
1173 curp = path + depth;
1174 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1179 /* we use already allocated block for index block,
1180 * so subsequent data blocks should be contiguous */
1181 if (EXT_HAS_FREE_INDEX(curp)) {
1182 /* if we found index with free entry, then use that
1183 * entry: create all needed subtree and add new leaf */
1184 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1189 ext4_ext_drop_refs(path);
1190 path = ext4_ext_find_extent(inode,
1191 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1194 err = PTR_ERR(path);
1196 /* tree is full, time to grow in depth */
1197 err = ext4_ext_grow_indepth(handle, inode, flags,
1203 ext4_ext_drop_refs(path);
1204 path = ext4_ext_find_extent(inode,
1205 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1208 err = PTR_ERR(path);
1213 * only first (depth 0 -> 1) produces free space;
1214 * in all other cases we have to split the grown tree
1216 depth = ext_depth(inode);
1217 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1218 /* now we need to split */
1228 * search the closest allocated block to the left for *logical
1229 * and returns it at @logical + it's physical address at @phys
1230 * if *logical is the smallest allocated block, the function
1231 * returns 0 at @phys
1232 * return value contains 0 (success) or error code
1234 static int ext4_ext_search_left(struct inode *inode,
1235 struct ext4_ext_path *path,
1236 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1238 struct ext4_extent_idx *ix;
1239 struct ext4_extent *ex;
1242 if (unlikely(path == NULL)) {
1243 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1246 depth = path->p_depth;
1249 if (depth == 0 && path->p_ext == NULL)
1252 /* usually extent in the path covers blocks smaller
1253 * then *logical, but it can be that extent is the
1254 * first one in the file */
1256 ex = path[depth].p_ext;
1257 ee_len = ext4_ext_get_actual_len(ex);
1258 if (*logical < le32_to_cpu(ex->ee_block)) {
1259 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1260 EXT4_ERROR_INODE(inode,
1261 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1262 *logical, le32_to_cpu(ex->ee_block));
1265 while (--depth >= 0) {
1266 ix = path[depth].p_idx;
1267 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1268 EXT4_ERROR_INODE(inode,
1269 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1270 ix != NULL ? ix->ei_block : 0,
1271 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1272 EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
1280 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1281 EXT4_ERROR_INODE(inode,
1282 "logical %d < ee_block %d + ee_len %d!",
1283 *logical, le32_to_cpu(ex->ee_block), ee_len);
1287 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1288 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1293 * search the closest allocated block to the right for *logical
1294 * and returns it at @logical + it's physical address at @phys
1295 * if *logical is the smallest allocated block, the function
1296 * returns 0 at @phys
1297 * return value contains 0 (success) or error code
1299 static int ext4_ext_search_right(struct inode *inode,
1300 struct ext4_ext_path *path,
1301 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1303 struct buffer_head *bh = NULL;
1304 struct ext4_extent_header *eh;
1305 struct ext4_extent_idx *ix;
1306 struct ext4_extent *ex;
1308 int depth; /* Note, NOT eh_depth; depth from top of tree */
1311 if (unlikely(path == NULL)) {
1312 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1315 depth = path->p_depth;
1318 if (depth == 0 && path->p_ext == NULL)
1321 /* usually extent in the path covers blocks smaller
1322 * then *logical, but it can be that extent is the
1323 * first one in the file */
1325 ex = path[depth].p_ext;
1326 ee_len = ext4_ext_get_actual_len(ex);
1327 if (*logical < le32_to_cpu(ex->ee_block)) {
1328 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1329 EXT4_ERROR_INODE(inode,
1330 "first_extent(path[%d].p_hdr) != ex",
1334 while (--depth >= 0) {
1335 ix = path[depth].p_idx;
1336 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1337 EXT4_ERROR_INODE(inode,
1338 "ix != EXT_FIRST_INDEX *logical %d!",
1343 *logical = le32_to_cpu(ex->ee_block);
1344 *phys = ext4_ext_pblock(ex);
1348 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1349 EXT4_ERROR_INODE(inode,
1350 "logical %d < ee_block %d + ee_len %d!",
1351 *logical, le32_to_cpu(ex->ee_block), ee_len);
1355 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1356 /* next allocated block in this leaf */
1358 *logical = le32_to_cpu(ex->ee_block);
1359 *phys = ext4_ext_pblock(ex);
1363 /* go up and search for index to the right */
1364 while (--depth >= 0) {
1365 ix = path[depth].p_idx;
1366 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1370 /* we've gone up to the root and found no index to the right */
1374 /* we've found index to the right, let's
1375 * follow it and find the closest allocated
1376 * block to the right */
1378 block = ext4_idx_pblock(ix);
1379 while (++depth < path->p_depth) {
1380 bh = sb_bread(inode->i_sb, block);
1383 eh = ext_block_hdr(bh);
1384 /* subtract from p_depth to get proper eh_depth */
1385 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1389 ix = EXT_FIRST_INDEX(eh);
1390 block = ext4_idx_pblock(ix);
1394 bh = sb_bread(inode->i_sb, block);
1397 eh = ext_block_hdr(bh);
1398 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1402 ex = EXT_FIRST_EXTENT(eh);
1403 *logical = le32_to_cpu(ex->ee_block);
1404 *phys = ext4_ext_pblock(ex);
1410 * ext4_ext_next_allocated_block:
1411 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1412 * NOTE: it considers block number from index entry as
1413 * allocated block. Thus, index entries have to be consistent
1417 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1421 BUG_ON(path == NULL);
1422 depth = path->p_depth;
1424 if (depth == 0 && path->p_ext == NULL)
1425 return EXT_MAX_BLOCKS;
1427 while (depth >= 0) {
1428 if (depth == path->p_depth) {
1430 if (path[depth].p_ext !=
1431 EXT_LAST_EXTENT(path[depth].p_hdr))
1432 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1435 if (path[depth].p_idx !=
1436 EXT_LAST_INDEX(path[depth].p_hdr))
1437 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1442 return EXT_MAX_BLOCKS;
1446 * ext4_ext_next_leaf_block:
1447 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1449 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1450 struct ext4_ext_path *path)
1454 BUG_ON(path == NULL);
1455 depth = path->p_depth;
1457 /* zero-tree has no leaf blocks at all */
1459 return EXT_MAX_BLOCKS;
1461 /* go to index block */
1464 while (depth >= 0) {
1465 if (path[depth].p_idx !=
1466 EXT_LAST_INDEX(path[depth].p_hdr))
1467 return (ext4_lblk_t)
1468 le32_to_cpu(path[depth].p_idx[1].ei_block);
1472 return EXT_MAX_BLOCKS;
1476 * ext4_ext_correct_indexes:
1477 * if leaf gets modified and modified extent is first in the leaf,
1478 * then we have to correct all indexes above.
1479 * TODO: do we need to correct tree in all cases?
1481 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1482 struct ext4_ext_path *path)
1484 struct ext4_extent_header *eh;
1485 int depth = ext_depth(inode);
1486 struct ext4_extent *ex;
1490 eh = path[depth].p_hdr;
1491 ex = path[depth].p_ext;
1493 if (unlikely(ex == NULL || eh == NULL)) {
1494 EXT4_ERROR_INODE(inode,
1495 "ex %p == NULL or eh %p == NULL", ex, eh);
1500 /* there is no tree at all */
1504 if (ex != EXT_FIRST_EXTENT(eh)) {
1505 /* we correct tree if first leaf got modified only */
1510 * TODO: we need correction if border is smaller than current one
1513 border = path[depth].p_ext->ee_block;
1514 err = ext4_ext_get_access(handle, inode, path + k);
1517 path[k].p_idx->ei_block = border;
1518 err = ext4_ext_dirty(handle, inode, path + k);
1523 /* change all left-side indexes */
1524 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1526 err = ext4_ext_get_access(handle, inode, path + k);
1529 path[k].p_idx->ei_block = border;
1530 err = ext4_ext_dirty(handle, inode, path + k);
1539 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1540 struct ext4_extent *ex2)
1542 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1545 * Make sure that either both extents are uninitialized, or
1548 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1551 if (ext4_ext_is_uninitialized(ex1))
1552 max_len = EXT_UNINIT_MAX_LEN;
1554 max_len = EXT_INIT_MAX_LEN;
1556 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1557 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1559 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1560 le32_to_cpu(ex2->ee_block))
1564 * To allow future support for preallocated extents to be added
1565 * as an RO_COMPAT feature, refuse to merge to extents if
1566 * this can result in the top bit of ee_len being set.
1568 if (ext1_ee_len + ext2_ee_len > max_len)
1570 #ifdef AGGRESSIVE_TEST
1571 if (ext1_ee_len >= 4)
1575 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1581 * This function tries to merge the "ex" extent to the next extent in the tree.
1582 * It always tries to merge towards right. If you want to merge towards
1583 * left, pass "ex - 1" as argument instead of "ex".
1584 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1585 * 1 if they got merged.
1587 static int ext4_ext_try_to_merge_right(struct inode *inode,
1588 struct ext4_ext_path *path,
1589 struct ext4_extent *ex)
1591 struct ext4_extent_header *eh;
1592 unsigned int depth, len;
1594 int uninitialized = 0;
1596 depth = ext_depth(inode);
1597 BUG_ON(path[depth].p_hdr == NULL);
1598 eh = path[depth].p_hdr;
1600 while (ex < EXT_LAST_EXTENT(eh)) {
1601 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1603 /* merge with next extent! */
1604 if (ext4_ext_is_uninitialized(ex))
1606 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1607 + ext4_ext_get_actual_len(ex + 1));
1609 ext4_ext_mark_uninitialized(ex);
1611 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1612 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1613 * sizeof(struct ext4_extent);
1614 memmove(ex + 1, ex + 2, len);
1616 le16_add_cpu(&eh->eh_entries, -1);
1618 WARN_ON(eh->eh_entries == 0);
1619 if (!eh->eh_entries)
1620 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1627 * This function tries to merge the @ex extent to neighbours in the tree.
1628 * return 1 if merge left else 0.
1630 static int ext4_ext_try_to_merge(struct inode *inode,
1631 struct ext4_ext_path *path,
1632 struct ext4_extent *ex) {
1633 struct ext4_extent_header *eh;
1638 depth = ext_depth(inode);
1639 BUG_ON(path[depth].p_hdr == NULL);
1640 eh = path[depth].p_hdr;
1642 if (ex > EXT_FIRST_EXTENT(eh))
1643 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1646 ret = ext4_ext_try_to_merge_right(inode, path, ex);
1652 * check if a portion of the "newext" extent overlaps with an
1655 * If there is an overlap discovered, it updates the length of the newext
1656 * such that there will be no overlap, and then returns 1.
1657 * If there is no overlap found, it returns 0.
1659 static unsigned int ext4_ext_check_overlap(struct inode *inode,
1660 struct ext4_extent *newext,
1661 struct ext4_ext_path *path)
1664 unsigned int depth, len1;
1665 unsigned int ret = 0;
1667 b1 = le32_to_cpu(newext->ee_block);
1668 len1 = ext4_ext_get_actual_len(newext);
1669 depth = ext_depth(inode);
1670 if (!path[depth].p_ext)
1672 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1675 * get the next allocated block if the extent in the path
1676 * is before the requested block(s)
1679 b2 = ext4_ext_next_allocated_block(path);
1680 if (b2 == EXT_MAX_BLOCKS)
1684 /* check for wrap through zero on extent logical start block*/
1685 if (b1 + len1 < b1) {
1686 len1 = EXT_MAX_BLOCKS - b1;
1687 newext->ee_len = cpu_to_le16(len1);
1691 /* check for overlap */
1692 if (b1 + len1 > b2) {
1693 newext->ee_len = cpu_to_le16(b2 - b1);
1701 * ext4_ext_insert_extent:
1702 * tries to merge requsted extent into the existing extent or
1703 * inserts requested extent as new one into the tree,
1704 * creating new leaf in the no-space case.
1706 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1707 struct ext4_ext_path *path,
1708 struct ext4_extent *newext, int flag)
1710 struct ext4_extent_header *eh;
1711 struct ext4_extent *ex, *fex;
1712 struct ext4_extent *nearex; /* nearest extent */
1713 struct ext4_ext_path *npath = NULL;
1714 int depth, len, err;
1716 unsigned uninitialized = 0;
1719 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1720 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1723 depth = ext_depth(inode);
1724 ex = path[depth].p_ext;
1725 if (unlikely(path[depth].p_hdr == NULL)) {
1726 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1730 /* try to insert block into found extent and return */
1731 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1732 && ext4_can_extents_be_merged(inode, ex, newext)) {
1733 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1734 ext4_ext_is_uninitialized(newext),
1735 ext4_ext_get_actual_len(newext),
1736 le32_to_cpu(ex->ee_block),
1737 ext4_ext_is_uninitialized(ex),
1738 ext4_ext_get_actual_len(ex),
1739 ext4_ext_pblock(ex));
1740 err = ext4_ext_get_access(handle, inode, path + depth);
1745 * ext4_can_extents_be_merged should have checked that either
1746 * both extents are uninitialized, or both aren't. Thus we
1747 * need to check only one of them here.
1749 if (ext4_ext_is_uninitialized(ex))
1751 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1752 + ext4_ext_get_actual_len(newext));
1754 ext4_ext_mark_uninitialized(ex);
1755 eh = path[depth].p_hdr;
1761 depth = ext_depth(inode);
1762 eh = path[depth].p_hdr;
1763 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1766 /* probably next leaf has space for us? */
1767 fex = EXT_LAST_EXTENT(eh);
1768 next = ext4_ext_next_leaf_block(inode, path);
1769 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1770 && next != EXT_MAX_BLOCKS) {
1771 ext_debug("next leaf block - %d\n", next);
1772 BUG_ON(npath != NULL);
1773 npath = ext4_ext_find_extent(inode, next, NULL);
1775 return PTR_ERR(npath);
1776 BUG_ON(npath->p_depth != path->p_depth);
1777 eh = npath[depth].p_hdr;
1778 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1779 ext_debug("next leaf isn't full(%d)\n",
1780 le16_to_cpu(eh->eh_entries));
1784 ext_debug("next leaf has no free space(%d,%d)\n",
1785 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1789 * There is no free space in the found leaf.
1790 * We're gonna add a new leaf in the tree.
1792 if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1793 flags = EXT4_MB_USE_ROOT_BLOCKS;
1794 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1797 depth = ext_depth(inode);
1798 eh = path[depth].p_hdr;
1801 nearex = path[depth].p_ext;
1803 err = ext4_ext_get_access(handle, inode, path + depth);
1808 /* there is no extent in this leaf, create first one */
1809 ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
1810 le32_to_cpu(newext->ee_block),
1811 ext4_ext_pblock(newext),
1812 ext4_ext_is_uninitialized(newext),
1813 ext4_ext_get_actual_len(newext));
1814 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1815 } else if (le32_to_cpu(newext->ee_block)
1816 > le32_to_cpu(nearex->ee_block)) {
1817 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1818 if (nearex != EXT_LAST_EXTENT(eh)) {
1819 len = EXT_MAX_EXTENT(eh) - nearex;
1820 len = (len - 1) * sizeof(struct ext4_extent);
1821 len = len < 0 ? 0 : len;
1822 ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
1823 "move %d from 0x%p to 0x%p\n",
1824 le32_to_cpu(newext->ee_block),
1825 ext4_ext_pblock(newext),
1826 ext4_ext_is_uninitialized(newext),
1827 ext4_ext_get_actual_len(newext),
1828 nearex, len, nearex + 1, nearex + 2);
1829 memmove(nearex + 2, nearex + 1, len);
1831 path[depth].p_ext = nearex + 1;
1833 BUG_ON(newext->ee_block == nearex->ee_block);
1834 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1835 len = len < 0 ? 0 : len;
1836 ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
1837 "move %d from 0x%p to 0x%p\n",
1838 le32_to_cpu(newext->ee_block),
1839 ext4_ext_pblock(newext),
1840 ext4_ext_is_uninitialized(newext),
1841 ext4_ext_get_actual_len(newext),
1842 nearex, len, nearex + 1, nearex + 2);
1843 memmove(nearex + 1, nearex, len);
1844 path[depth].p_ext = nearex;
1847 le16_add_cpu(&eh->eh_entries, 1);
1848 nearex = path[depth].p_ext;
1849 nearex->ee_block = newext->ee_block;
1850 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1851 nearex->ee_len = newext->ee_len;
1854 /* try to merge extents to the right */
1855 if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1856 ext4_ext_try_to_merge(inode, path, nearex);
1858 /* try to merge extents to the left */
1860 /* time to correct all indexes above */
1861 err = ext4_ext_correct_indexes(handle, inode, path);
1865 err = ext4_ext_dirty(handle, inode, path + depth);
1869 ext4_ext_drop_refs(npath);
1872 ext4_ext_invalidate_cache(inode);
1876 static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1877 ext4_lblk_t num, ext_prepare_callback func,
1880 struct ext4_ext_path *path = NULL;
1881 struct ext4_ext_cache cbex;
1882 struct ext4_extent *ex;
1883 ext4_lblk_t next, start = 0, end = 0;
1884 ext4_lblk_t last = block + num;
1885 int depth, exists, err = 0;
1887 BUG_ON(func == NULL);
1888 BUG_ON(inode == NULL);
1890 while (block < last && block != EXT_MAX_BLOCKS) {
1892 /* find extent for this block */
1893 down_read(&EXT4_I(inode)->i_data_sem);
1894 path = ext4_ext_find_extent(inode, block, path);
1895 up_read(&EXT4_I(inode)->i_data_sem);
1897 err = PTR_ERR(path);
1902 depth = ext_depth(inode);
1903 if (unlikely(path[depth].p_hdr == NULL)) {
1904 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1908 ex = path[depth].p_ext;
1909 next = ext4_ext_next_allocated_block(path);
1913 /* there is no extent yet, so try to allocate
1914 * all requested space */
1917 } else if (le32_to_cpu(ex->ee_block) > block) {
1918 /* need to allocate space before found extent */
1920 end = le32_to_cpu(ex->ee_block);
1921 if (block + num < end)
1923 } else if (block >= le32_to_cpu(ex->ee_block)
1924 + ext4_ext_get_actual_len(ex)) {
1925 /* need to allocate space after found extent */
1930 } else if (block >= le32_to_cpu(ex->ee_block)) {
1932 * some part of requested space is covered
1936 end = le32_to_cpu(ex->ee_block)
1937 + ext4_ext_get_actual_len(ex);
1938 if (block + num < end)
1944 BUG_ON(end <= start);
1947 cbex.ec_block = start;
1948 cbex.ec_len = end - start;
1951 cbex.ec_block = le32_to_cpu(ex->ee_block);
1952 cbex.ec_len = ext4_ext_get_actual_len(ex);
1953 cbex.ec_start = ext4_ext_pblock(ex);
1956 if (unlikely(cbex.ec_len == 0)) {
1957 EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1961 err = func(inode, next, &cbex, ex, cbdata);
1962 ext4_ext_drop_refs(path);
1967 if (err == EXT_REPEAT)
1969 else if (err == EXT_BREAK) {
1974 if (ext_depth(inode) != depth) {
1975 /* depth was changed. we have to realloc path */
1980 block = cbex.ec_block + cbex.ec_len;
1984 ext4_ext_drop_refs(path);
1992 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1993 __u32 len, ext4_fsblk_t start)
1995 struct ext4_ext_cache *cex;
1997 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1998 cex = &EXT4_I(inode)->i_cached_extent;
1999 cex->ec_block = block;
2001 cex->ec_start = start;
2002 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2006 * ext4_ext_put_gap_in_cache:
2007 * calculate boundaries of the gap that the requested block fits into
2008 * and cache this gap
2011 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2014 int depth = ext_depth(inode);
2017 struct ext4_extent *ex;
2019 ex = path[depth].p_ext;
2021 /* there is no extent yet, so gap is [0;-] */
2023 len = EXT_MAX_BLOCKS;
2024 ext_debug("cache gap(whole file):");
2025 } else if (block < le32_to_cpu(ex->ee_block)) {
2027 len = le32_to_cpu(ex->ee_block) - block;
2028 ext_debug("cache gap(before): %u [%u:%u]",
2030 le32_to_cpu(ex->ee_block),
2031 ext4_ext_get_actual_len(ex));
2032 } else if (block >= le32_to_cpu(ex->ee_block)
2033 + ext4_ext_get_actual_len(ex)) {
2035 lblock = le32_to_cpu(ex->ee_block)
2036 + ext4_ext_get_actual_len(ex);
2038 next = ext4_ext_next_allocated_block(path);
2039 ext_debug("cache gap(after): [%u:%u] %u",
2040 le32_to_cpu(ex->ee_block),
2041 ext4_ext_get_actual_len(ex),
2043 BUG_ON(next == lblock);
2044 len = next - lblock;
2050 ext_debug(" -> %u:%lu\n", lblock, len);
2051 ext4_ext_put_in_cache(inode, lblock, len, 0);
2055 * ext4_ext_in_cache()
2056 * Checks to see if the given block is in the cache.
2057 * If it is, the cached extent is stored in the given
2058 * cache extent pointer. If the cached extent is a hole,
2059 * this routine should be used instead of
2060 * ext4_ext_in_cache if the calling function needs to
2061 * know the size of the hole.
2063 * @inode: The files inode
2064 * @block: The block to look for in the cache
2065 * @ex: Pointer where the cached extent will be stored
2066 * if it contains block
2068 * Return 0 if cache is invalid; 1 if the cache is valid
2070 static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2071 struct ext4_ext_cache *ex){
2072 struct ext4_ext_cache *cex;
2073 struct ext4_sb_info *sbi;
2077 * We borrow i_block_reservation_lock to protect i_cached_extent
2079 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2080 cex = &EXT4_I(inode)->i_cached_extent;
2081 sbi = EXT4_SB(inode->i_sb);
2083 /* has cache valid data? */
2084 if (cex->ec_len == 0)
2087 if (in_range(block, cex->ec_block, cex->ec_len)) {
2088 memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2089 ext_debug("%u cached by %u:%u:%llu\n",
2091 cex->ec_block, cex->ec_len, cex->ec_start);
2096 sbi->extent_cache_misses++;
2098 sbi->extent_cache_hits++;
2099 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2104 * ext4_ext_in_cache()
2105 * Checks to see if the given block is in the cache.
2106 * If it is, the cached extent is stored in the given
2109 * @inode: The files inode
2110 * @block: The block to look for in the cache
2111 * @ex: Pointer where the cached extent will be stored
2112 * if it contains block
2114 * Return 0 if cache is invalid; 1 if the cache is valid
2117 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2118 struct ext4_extent *ex)
2120 struct ext4_ext_cache cex;
2123 if (ext4_ext_check_cache(inode, block, &cex)) {
2124 ex->ee_block = cpu_to_le32(cex.ec_block);
2125 ext4_ext_store_pblock(ex, cex.ec_start);
2126 ex->ee_len = cpu_to_le16(cex.ec_len);
2136 * removes index from the index block.
2137 * It's used in truncate case only, thus all requests are for
2138 * last index in the block only.
2140 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2141 struct ext4_ext_path *path)
2146 /* free index block */
2148 leaf = ext4_idx_pblock(path->p_idx);
2149 if (unlikely(path->p_hdr->eh_entries == 0)) {
2150 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2153 err = ext4_ext_get_access(handle, inode, path);
2156 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2157 err = ext4_ext_dirty(handle, inode, path);
2160 ext_debug("index is empty, remove it, free block %llu\n", leaf);
2161 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2162 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2167 * ext4_ext_calc_credits_for_single_extent:
2168 * This routine returns max. credits that needed to insert an extent
2169 * to the extent tree.
2170 * When pass the actual path, the caller should calculate credits
2173 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2174 struct ext4_ext_path *path)
2177 int depth = ext_depth(inode);
2180 /* probably there is space in leaf? */
2181 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2182 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2185 * There are some space in the leaf tree, no
2186 * need to account for leaf block credit
2188 * bitmaps and block group descriptor blocks
2189 * and other metadat blocks still need to be
2192 /* 1 bitmap, 1 block group descriptor */
2193 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2198 return ext4_chunk_trans_blocks(inode, nrblocks);
2202 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2204 * if nrblocks are fit in a single extent (chunk flag is 1), then
2205 * in the worse case, each tree level index/leaf need to be changed
2206 * if the tree split due to insert a new extent, then the old tree
2207 * index/leaf need to be updated too
2209 * If the nrblocks are discontiguous, they could cause
2210 * the whole tree split more than once, but this is really rare.
2212 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2215 int depth = ext_depth(inode);
2225 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2226 struct ext4_extent *ex,
2227 ext4_lblk_t from, ext4_lblk_t to)
2229 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2230 int flags = EXT4_FREE_BLOCKS_FORGET;
2232 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2233 flags |= EXT4_FREE_BLOCKS_METADATA;
2234 #ifdef EXTENTS_STATS
2236 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2237 spin_lock(&sbi->s_ext_stats_lock);
2238 sbi->s_ext_blocks += ee_len;
2239 sbi->s_ext_extents++;
2240 if (ee_len < sbi->s_ext_min)
2241 sbi->s_ext_min = ee_len;
2242 if (ee_len > sbi->s_ext_max)
2243 sbi->s_ext_max = ee_len;
2244 if (ext_depth(inode) > sbi->s_depth_max)
2245 sbi->s_depth_max = ext_depth(inode);
2246 spin_unlock(&sbi->s_ext_stats_lock);
2249 if (from >= le32_to_cpu(ex->ee_block)
2250 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2255 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2256 start = ext4_ext_pblock(ex) + ee_len - num;
2257 ext_debug("free last %u blocks starting %llu\n", num, start);
2258 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2259 } else if (from == le32_to_cpu(ex->ee_block)
2260 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2266 start = ext4_ext_pblock(ex);
2268 ext_debug("free first %u blocks starting %llu\n", num, start);
2269 ext4_free_blocks(handle, inode, 0, start, num, flags);
2272 printk(KERN_INFO "strange request: removal(2) "
2273 "%u-%u from %u:%u\n",
2274 from, to, le32_to_cpu(ex->ee_block), ee_len);
2281 * ext4_ext_rm_leaf() Removes the extents associated with the
2282 * blocks appearing between "start" and "end", and splits the extents
2283 * if "start" and "end" appear in the same extent
2285 * @handle: The journal handle
2286 * @inode: The files inode
2287 * @path: The path to the leaf
2288 * @start: The first block to remove
2289 * @end: The last block to remove
2292 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2293 struct ext4_ext_path *path, ext4_lblk_t start,
2296 int err = 0, correct_index = 0;
2297 int depth = ext_depth(inode), credits;
2298 struct ext4_extent_header *eh;
2299 ext4_lblk_t a, b, block;
2301 ext4_lblk_t ex_ee_block;
2302 unsigned short ex_ee_len;
2303 unsigned uninitialized = 0;
2304 struct ext4_extent *ex;
2305 struct ext4_map_blocks map;
2307 /* the header must be checked already in ext4_ext_remove_space() */
2308 ext_debug("truncate since %u in leaf\n", start);
2309 if (!path[depth].p_hdr)
2310 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2311 eh = path[depth].p_hdr;
2312 if (unlikely(path[depth].p_hdr == NULL)) {
2313 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2316 /* find where to start removing */
2317 ex = EXT_LAST_EXTENT(eh);
2319 ex_ee_block = le32_to_cpu(ex->ee_block);
2320 ex_ee_len = ext4_ext_get_actual_len(ex);
2322 while (ex >= EXT_FIRST_EXTENT(eh) &&
2323 ex_ee_block + ex_ee_len > start) {
2325 if (ext4_ext_is_uninitialized(ex))
2330 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2331 uninitialized, ex_ee_len);
2332 path[depth].p_ext = ex;
2334 a = ex_ee_block > start ? ex_ee_block : start;
2335 b = ex_ee_block+ex_ee_len - 1 < end ?
2336 ex_ee_block+ex_ee_len - 1 : end;
2338 ext_debug(" border %u:%u\n", a, b);
2340 /* If this extent is beyond the end of the hole, skip it */
2341 if (end <= ex_ee_block) {
2343 ex_ee_block = le32_to_cpu(ex->ee_block);
2344 ex_ee_len = ext4_ext_get_actual_len(ex);
2346 } else if (a != ex_ee_block &&
2347 b != ex_ee_block + ex_ee_len - 1) {
2349 * If this is a truncate, then this condition should
2350 * never happen because at least one of the end points
2351 * needs to be on the edge of the extent.
2353 if (end == EXT_MAX_BLOCKS - 1) {
2354 ext_debug(" bad truncate %u:%u\n",
2362 * else this is a hole punch, so the extent needs to
2363 * be split since neither edge of the hole is on the
2367 map.m_pblk = ext4_ext_pblock(ex);
2368 map.m_lblk = ex_ee_block;
2369 map.m_len = b - ex_ee_block;
2371 err = ext4_split_extent(handle,
2372 inode, path, &map, 0,
2373 EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
2374 EXT4_GET_BLOCKS_PRE_IO);
2379 ex_ee_len = ext4_ext_get_actual_len(ex);
2381 b = ex_ee_block+ex_ee_len - 1 < end ?
2382 ex_ee_block+ex_ee_len - 1 : end;
2384 /* Then remove tail of this extent */
2385 block = ex_ee_block;
2388 } else if (a != ex_ee_block) {
2389 /* remove tail of the extent */
2390 block = ex_ee_block;
2392 } else if (b != ex_ee_block + ex_ee_len - 1) {
2393 /* remove head of the extent */
2395 num = ex_ee_block + ex_ee_len - b;
2398 * If this is a truncate, this condition
2399 * should never happen
2401 if (end == EXT_MAX_BLOCKS - 1) {
2402 ext_debug(" bad truncate %u:%u\n",
2408 /* remove whole extent: excellent! */
2409 block = ex_ee_block;
2411 if (a != ex_ee_block) {
2412 ext_debug(" bad truncate %u:%u\n",
2418 if (b != ex_ee_block + ex_ee_len - 1) {
2419 ext_debug(" bad truncate %u:%u\n",
2427 * 3 for leaf, sb, and inode plus 2 (bmap and group
2428 * descriptor) for each block group; assume two block
2429 * groups plus ex_ee_len/blocks_per_block_group for
2432 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2433 if (ex == EXT_FIRST_EXTENT(eh)) {
2435 credits += (ext_depth(inode)) + 1;
2437 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2439 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2443 err = ext4_ext_get_access(handle, inode, path + depth);
2447 err = ext4_remove_blocks(handle, inode, ex, a, b);
2452 /* this extent is removed; mark slot entirely unused */
2453 ext4_ext_store_pblock(ex, 0);
2454 } else if (block != ex_ee_block) {
2456 * If this was a head removal, then we need to update
2457 * the physical block since it is now at a different
2460 ext4_ext_store_pblock(ex, ext4_ext_pblock(ex) + (b-a));
2463 ex->ee_block = cpu_to_le32(block);
2464 ex->ee_len = cpu_to_le16(num);
2466 * Do not mark uninitialized if all the blocks in the
2467 * extent have been removed.
2469 if (uninitialized && num)
2470 ext4_ext_mark_uninitialized(ex);
2472 err = ext4_ext_dirty(handle, inode, path + depth);
2477 * If the extent was completely released,
2478 * we need to remove it from the leaf
2481 if (end != EXT_MAX_BLOCKS - 1) {
2483 * For hole punching, we need to scoot all the
2484 * extents up when an extent is removed so that
2485 * we dont have blank extents in the middle
2487 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2488 sizeof(struct ext4_extent));
2490 /* Now get rid of the one at the end */
2491 memset(EXT_LAST_EXTENT(eh), 0,
2492 sizeof(struct ext4_extent));
2494 le16_add_cpu(&eh->eh_entries, -1);
2497 ext_debug("new extent: %u:%u:%llu\n", block, num,
2498 ext4_ext_pblock(ex));
2500 ex_ee_block = le32_to_cpu(ex->ee_block);
2501 ex_ee_len = ext4_ext_get_actual_len(ex);
2504 if (correct_index && eh->eh_entries)
2505 err = ext4_ext_correct_indexes(handle, inode, path);
2507 /* if this leaf is free, then we should
2508 * remove it from index block above */
2509 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2510 err = ext4_ext_rm_idx(handle, inode, path + depth);
2517 * ext4_ext_more_to_rm:
2518 * returns 1 if current index has to be freed (even partial)
2521 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2523 BUG_ON(path->p_idx == NULL);
2525 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2529 * if truncate on deeper level happened, it wasn't partial,
2530 * so we have to consider current index for truncation
2532 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2537 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2540 struct super_block *sb = inode->i_sb;
2541 int depth = ext_depth(inode);
2542 struct ext4_ext_path *path;
2546 ext_debug("truncate since %u\n", start);
2548 /* probably first extent we're gonna free will be last in block */
2549 handle = ext4_journal_start(inode, depth + 1);
2551 return PTR_ERR(handle);
2554 ext4_ext_invalidate_cache(inode);
2557 * We start scanning from right side, freeing all the blocks
2558 * after i_size and walking into the tree depth-wise.
2560 depth = ext_depth(inode);
2561 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2563 ext4_journal_stop(handle);
2566 path[0].p_depth = depth;
2567 path[0].p_hdr = ext_inode_hdr(inode);
2568 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2574 while (i >= 0 && err == 0) {
2576 /* this is leaf block */
2577 err = ext4_ext_rm_leaf(handle, inode, path,
2579 /* root level has p_bh == NULL, brelse() eats this */
2580 brelse(path[i].p_bh);
2581 path[i].p_bh = NULL;
2586 /* this is index block */
2587 if (!path[i].p_hdr) {
2588 ext_debug("initialize header\n");
2589 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2592 if (!path[i].p_idx) {
2593 /* this level hasn't been touched yet */
2594 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2595 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2596 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2598 le16_to_cpu(path[i].p_hdr->eh_entries));
2600 /* we were already here, see at next index */
2604 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2605 i, EXT_FIRST_INDEX(path[i].p_hdr),
2607 if (ext4_ext_more_to_rm(path + i)) {
2608 struct buffer_head *bh;
2609 /* go to the next level */
2610 ext_debug("move to level %d (block %llu)\n",
2611 i + 1, ext4_idx_pblock(path[i].p_idx));
2612 memset(path + i + 1, 0, sizeof(*path));
2613 bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2615 /* should we reset i_size? */
2619 if (WARN_ON(i + 1 > depth)) {
2623 if (ext4_ext_check(inode, ext_block_hdr(bh),
2628 path[i + 1].p_bh = bh;
2630 /* save actual number of indexes since this
2631 * number is changed at the next iteration */
2632 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2635 /* we finished processing this index, go up */
2636 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2637 /* index is empty, remove it;
2638 * handle must be already prepared by the
2639 * truncatei_leaf() */
2640 err = ext4_ext_rm_idx(handle, inode, path + i);
2642 /* root level has p_bh == NULL, brelse() eats this */
2643 brelse(path[i].p_bh);
2644 path[i].p_bh = NULL;
2646 ext_debug("return to level %d\n", i);
2650 /* TODO: flexible tree reduction should be here */
2651 if (path->p_hdr->eh_entries == 0) {
2653 * truncate to zero freed all the tree,
2654 * so we need to correct eh_depth
2656 err = ext4_ext_get_access(handle, inode, path);
2658 ext_inode_hdr(inode)->eh_depth = 0;
2659 ext_inode_hdr(inode)->eh_max =
2660 cpu_to_le16(ext4_ext_space_root(inode, 0));
2661 err = ext4_ext_dirty(handle, inode, path);
2665 ext4_ext_drop_refs(path);
2669 ext4_journal_stop(handle);
2675 * called at mount time
2677 void ext4_ext_init(struct super_block *sb)
2680 * possible initialization would be here
2683 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2684 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2685 printk(KERN_INFO "EXT4-fs: file extents enabled");
2686 #ifdef AGGRESSIVE_TEST
2687 printk(", aggressive tests");
2689 #ifdef CHECK_BINSEARCH
2690 printk(", check binsearch");
2692 #ifdef EXTENTS_STATS
2697 #ifdef EXTENTS_STATS
2698 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2699 EXT4_SB(sb)->s_ext_min = 1 << 30;
2700 EXT4_SB(sb)->s_ext_max = 0;
2706 * called at umount time
2708 void ext4_ext_release(struct super_block *sb)
2710 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2713 #ifdef EXTENTS_STATS
2714 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2715 struct ext4_sb_info *sbi = EXT4_SB(sb);
2716 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2717 sbi->s_ext_blocks, sbi->s_ext_extents,
2718 sbi->s_ext_blocks / sbi->s_ext_extents);
2719 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2720 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2725 /* FIXME!! we need to try to merge to left or right after zero-out */
2726 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2728 ext4_fsblk_t ee_pblock;
2729 unsigned int ee_len;
2732 ee_len = ext4_ext_get_actual_len(ex);
2733 ee_pblock = ext4_ext_pblock(ex);
2735 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2743 * used by extent splitting.
2745 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
2747 #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
2748 #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
2751 * ext4_split_extent_at() splits an extent at given block.
2753 * @handle: the journal handle
2754 * @inode: the file inode
2755 * @path: the path to the extent
2756 * @split: the logical block where the extent is splitted.
2757 * @split_flags: indicates if the extent could be zeroout if split fails, and
2758 * the states(init or uninit) of new extents.
2759 * @flags: flags used to insert new extent to extent tree.
2762 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2763 * of which are deterimined by split_flag.
2765 * There are two cases:
2766 * a> the extent are splitted into two extent.
2767 * b> split is not needed, and just mark the extent.
2769 * return 0 on success.
2771 static int ext4_split_extent_at(handle_t *handle,
2772 struct inode *inode,
2773 struct ext4_ext_path *path,
2778 ext4_fsblk_t newblock;
2779 ext4_lblk_t ee_block;
2780 struct ext4_extent *ex, newex, orig_ex;
2781 struct ext4_extent *ex2 = NULL;
2782 unsigned int ee_len, depth;
2785 ext_debug("ext4_split_extents_at: inode %lu, logical"
2786 "block %llu\n", inode->i_ino, (unsigned long long)split);
2788 ext4_ext_show_leaf(inode, path);
2790 depth = ext_depth(inode);
2791 ex = path[depth].p_ext;
2792 ee_block = le32_to_cpu(ex->ee_block);
2793 ee_len = ext4_ext_get_actual_len(ex);
2794 newblock = split - ee_block + ext4_ext_pblock(ex);
2796 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2798 err = ext4_ext_get_access(handle, inode, path + depth);
2802 if (split == ee_block) {
2804 * case b: block @split is the block that the extent begins with
2805 * then we just change the state of the extent, and splitting
2808 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2809 ext4_ext_mark_uninitialized(ex);
2811 ext4_ext_mark_initialized(ex);
2813 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2814 ext4_ext_try_to_merge(inode, path, ex);
2816 err = ext4_ext_dirty(handle, inode, path + depth);
2821 memcpy(&orig_ex, ex, sizeof(orig_ex));
2822 ex->ee_len = cpu_to_le16(split - ee_block);
2823 if (split_flag & EXT4_EXT_MARK_UNINIT1)
2824 ext4_ext_mark_uninitialized(ex);
2827 * path may lead to new leaf, not to original leaf any more
2828 * after ext4_ext_insert_extent() returns,
2830 err = ext4_ext_dirty(handle, inode, path + depth);
2832 goto fix_extent_len;
2835 ex2->ee_block = cpu_to_le32(split);
2836 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
2837 ext4_ext_store_pblock(ex2, newblock);
2838 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2839 ext4_ext_mark_uninitialized(ex2);
2841 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2842 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2843 err = ext4_ext_zeroout(inode, &orig_ex);
2845 goto fix_extent_len;
2846 /* update the extent length and mark as initialized */
2847 ex->ee_len = cpu_to_le32(ee_len);
2848 ext4_ext_try_to_merge(inode, path, ex);
2849 err = ext4_ext_dirty(handle, inode, path + depth);
2852 goto fix_extent_len;
2855 ext4_ext_show_leaf(inode, path);
2859 ex->ee_len = orig_ex.ee_len;
2860 ext4_ext_dirty(handle, inode, path + depth);
2865 * ext4_split_extents() splits an extent and mark extent which is covered
2866 * by @map as split_flags indicates
2868 * It may result in splitting the extent into multiple extents (upto three)
2869 * There are three possibilities:
2870 * a> There is no split required
2871 * b> Splits in two extents: Split is happening at either end of the extent
2872 * c> Splits in three extents: Somone is splitting in middle of the extent
2875 static int ext4_split_extent(handle_t *handle,
2876 struct inode *inode,
2877 struct ext4_ext_path *path,
2878 struct ext4_map_blocks *map,
2882 ext4_lblk_t ee_block;
2883 struct ext4_extent *ex;
2884 unsigned int ee_len, depth;
2887 int split_flag1, flags1;
2889 depth = ext_depth(inode);
2890 ex = path[depth].p_ext;
2891 ee_block = le32_to_cpu(ex->ee_block);
2892 ee_len = ext4_ext_get_actual_len(ex);
2893 uninitialized = ext4_ext_is_uninitialized(ex);
2895 if (map->m_lblk + map->m_len < ee_block + ee_len) {
2896 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2897 EXT4_EXT_MAY_ZEROOUT : 0;
2898 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
2900 split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
2901 EXT4_EXT_MARK_UNINIT2;
2902 err = ext4_split_extent_at(handle, inode, path,
2903 map->m_lblk + map->m_len, split_flag1, flags1);
2908 ext4_ext_drop_refs(path);
2909 path = ext4_ext_find_extent(inode, map->m_lblk, path);
2911 return PTR_ERR(path);
2913 if (map->m_lblk >= ee_block) {
2914 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2915 EXT4_EXT_MAY_ZEROOUT : 0;
2917 split_flag1 |= EXT4_EXT_MARK_UNINIT1;
2918 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2919 split_flag1 |= EXT4_EXT_MARK_UNINIT2;
2920 err = ext4_split_extent_at(handle, inode, path,
2921 map->m_lblk, split_flag1, flags);
2926 ext4_ext_show_leaf(inode, path);
2928 return err ? err : map->m_len;
2931 #define EXT4_EXT_ZERO_LEN 7
2933 * This function is called by ext4_ext_map_blocks() if someone tries to write
2934 * to an uninitialized extent. It may result in splitting the uninitialized
2935 * extent into multiple extents (up to three - one initialized and two
2937 * There are three possibilities:
2938 * a> There is no split required: Entire extent should be initialized
2939 * b> Splits in two extents: Write is happening at either end of the extent
2940 * c> Splits in three extents: Somone is writing in middle of the extent
2942 static int ext4_ext_convert_to_initialized(handle_t *handle,
2943 struct inode *inode,
2944 struct ext4_map_blocks *map,
2945 struct ext4_ext_path *path)
2947 struct ext4_map_blocks split_map;
2948 struct ext4_extent zero_ex;
2949 struct ext4_extent *ex;
2950 ext4_lblk_t ee_block, eof_block;
2951 unsigned int allocated, ee_len, depth;
2955 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2956 "block %llu, max_blocks %u\n", inode->i_ino,
2957 (unsigned long long)map->m_lblk, map->m_len);
2959 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2960 inode->i_sb->s_blocksize_bits;
2961 if (eof_block < map->m_lblk + map->m_len)
2962 eof_block = map->m_lblk + map->m_len;
2964 depth = ext_depth(inode);
2965 ex = path[depth].p_ext;
2966 ee_block = le32_to_cpu(ex->ee_block);
2967 ee_len = ext4_ext_get_actual_len(ex);
2968 allocated = ee_len - (map->m_lblk - ee_block);
2970 WARN_ON(map->m_lblk < ee_block);
2972 * It is safe to convert extent to initialized via explicit
2973 * zeroout only if extent is fully insde i_size or new_size.
2975 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
2977 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2978 if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
2979 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2980 err = ext4_ext_zeroout(inode, ex);
2984 err = ext4_ext_get_access(handle, inode, path + depth);
2987 ext4_ext_mark_initialized(ex);
2988 ext4_ext_try_to_merge(inode, path, ex);
2989 err = ext4_ext_dirty(handle, inode, path + depth);
2995 * 1. split the extent into three extents.
2996 * 2. split the extent into two extents, zeroout the first half.
2997 * 3. split the extent into two extents, zeroout the second half.
2998 * 4. split the extent into two extents with out zeroout.
3000 split_map.m_lblk = map->m_lblk;
3001 split_map.m_len = map->m_len;
3003 if (allocated > map->m_len) {
3004 if (allocated <= EXT4_EXT_ZERO_LEN &&
3005 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3008 cpu_to_le32(map->m_lblk);
3009 zero_ex.ee_len = cpu_to_le16(allocated);
3010 ext4_ext_store_pblock(&zero_ex,
3011 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3012 err = ext4_ext_zeroout(inode, &zero_ex);
3015 split_map.m_lblk = map->m_lblk;
3016 split_map.m_len = allocated;
3017 } else if ((map->m_lblk - ee_block + map->m_len <
3018 EXT4_EXT_ZERO_LEN) &&
3019 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3021 if (map->m_lblk != ee_block) {
3022 zero_ex.ee_block = ex->ee_block;
3023 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3025 ext4_ext_store_pblock(&zero_ex,
3026 ext4_ext_pblock(ex));
3027 err = ext4_ext_zeroout(inode, &zero_ex);
3032 split_map.m_lblk = ee_block;
3033 split_map.m_len = map->m_lblk - ee_block + map->m_len;
3034 allocated = map->m_len;
3038 allocated = ext4_split_extent(handle, inode, path,
3039 &split_map, split_flag, 0);
3044 return err ? err : allocated;
3048 * This function is called by ext4_ext_map_blocks() from
3049 * ext4_get_blocks_dio_write() when DIO to write
3050 * to an uninitialized extent.
3052 * Writing to an uninitialized extent may result in splitting the uninitialized
3053 * extent into multiple /initialized uninitialized extents (up to three)
3054 * There are three possibilities:
3055 * a> There is no split required: Entire extent should be uninitialized
3056 * b> Splits in two extents: Write is happening at either end of the extent
3057 * c> Splits in three extents: Somone is writing in middle of the extent
3059 * One of more index blocks maybe needed if the extent tree grow after
3060 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3061 * complete, we need to split the uninitialized extent before DIO submit
3062 * the IO. The uninitialized extent called at this time will be split
3063 * into three uninitialized extent(at most). After IO complete, the part
3064 * being filled will be convert to initialized by the end_io callback function
3065 * via ext4_convert_unwritten_extents().
3067 * Returns the size of uninitialized extent to be written on success.
3069 static int ext4_split_unwritten_extents(handle_t *handle,
3070 struct inode *inode,
3071 struct ext4_map_blocks *map,
3072 struct ext4_ext_path *path,
3075 ext4_lblk_t eof_block;
3076 ext4_lblk_t ee_block;
3077 struct ext4_extent *ex;
3078 unsigned int ee_len;
3079 int split_flag = 0, depth;
3081 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3082 "block %llu, max_blocks %u\n", inode->i_ino,
3083 (unsigned long long)map->m_lblk, map->m_len);
3085 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3086 inode->i_sb->s_blocksize_bits;
3087 if (eof_block < map->m_lblk + map->m_len)
3088 eof_block = map->m_lblk + map->m_len;
3090 * It is safe to convert extent to initialized via explicit
3091 * zeroout only if extent is fully insde i_size or new_size.
3093 depth = ext_depth(inode);
3094 ex = path[depth].p_ext;
3095 ee_block = le32_to_cpu(ex->ee_block);
3096 ee_len = ext4_ext_get_actual_len(ex);
3098 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3099 split_flag |= EXT4_EXT_MARK_UNINIT2;
3101 flags |= EXT4_GET_BLOCKS_PRE_IO;
3102 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3105 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3106 struct inode *inode,
3107 struct ext4_ext_path *path)
3109 struct ext4_extent *ex;
3110 struct ext4_extent_header *eh;
3114 depth = ext_depth(inode);
3115 eh = path[depth].p_hdr;
3116 ex = path[depth].p_ext;
3118 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3119 "block %llu, max_blocks %u\n", inode->i_ino,
3120 (unsigned long long)le32_to_cpu(ex->ee_block),
3121 ext4_ext_get_actual_len(ex));
3123 err = ext4_ext_get_access(handle, inode, path + depth);
3126 /* first mark the extent as initialized */
3127 ext4_ext_mark_initialized(ex);
3129 /* note: ext4_ext_correct_indexes() isn't needed here because
3130 * borders are not changed
3132 ext4_ext_try_to_merge(inode, path, ex);
3134 /* Mark modified extent as dirty */
3135 err = ext4_ext_dirty(handle, inode, path + depth);
3137 ext4_ext_show_leaf(inode, path);
3141 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3142 sector_t block, int count)
3145 for (i = 0; i < count; i++)
3146 unmap_underlying_metadata(bdev, block + i);
3150 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3152 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3154 struct ext4_ext_path *path,
3158 struct ext4_extent_header *eh;
3159 struct ext4_extent *last_ex;
3161 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3164 depth = ext_depth(inode);
3165 eh = path[depth].p_hdr;
3167 if (unlikely(!eh->eh_entries)) {
3168 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
3169 "EOFBLOCKS_FL set");
3172 last_ex = EXT_LAST_EXTENT(eh);
3174 * We should clear the EOFBLOCKS_FL flag if we are writing the
3175 * last block in the last extent in the file. We test this by
3176 * first checking to see if the caller to
3177 * ext4_ext_get_blocks() was interested in the last block (or
3178 * a block beyond the last block) in the current extent. If
3179 * this turns out to be false, we can bail out from this
3180 * function immediately.
3182 if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3183 ext4_ext_get_actual_len(last_ex))
3186 * If the caller does appear to be planning to write at or
3187 * beyond the end of the current extent, we then test to see
3188 * if the current extent is the last extent in the file, by
3189 * checking to make sure it was reached via the rightmost node
3190 * at each level of the tree.
3192 for (i = depth-1; i >= 0; i--)
3193 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3195 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3196 return ext4_mark_inode_dirty(handle, inode);
3200 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3201 struct ext4_map_blocks *map,
3202 struct ext4_ext_path *path, int flags,
3203 unsigned int allocated, ext4_fsblk_t newblock)
3207 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3209 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3210 "block %llu, max_blocks %u, flags %d, allocated %u",
3211 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3213 ext4_ext_show_leaf(inode, path);
3215 /* get_block() before submit the IO, split the extent */
3216 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3217 ret = ext4_split_unwritten_extents(handle, inode, map,
3220 * Flag the inode(non aio case) or end_io struct (aio case)
3221 * that this IO needs to conversion to written when IO is
3224 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
3225 io->flag = EXT4_IO_END_UNWRITTEN;
3226 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
3228 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3229 if (ext4_should_dioread_nolock(inode))
3230 map->m_flags |= EXT4_MAP_UNINIT;
3233 /* IO end_io complete, convert the filled extent to written */
3234 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3235 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3238 ext4_update_inode_fsync_trans(handle, inode, 1);
3239 err = check_eofblocks_fl(handle, inode, map->m_lblk,