blob: d256b51f913d677f4165b8fda0f6d05dfd886108 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_types.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_dir2.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_dmapi.h"
29#include "xfs_mount.h"
Nathan Scotta844f452005-11-02 14:38:42 +110030#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110033#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_btree.h"
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +110038#include "xfs_btree_trace.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include "xfs_ialloc.h"
40#include "xfs_alloc.h"
41#include "xfs_error.h"
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44/*
45 * Get the data from the pointed-to record.
46 */
47int /* error */
48xfs_alloc_get_rec(
49 xfs_btree_cur_t *cur, /* btree cursor */
50 xfs_agblock_t *bno, /* output: starting block of extent */
51 xfs_extlen_t *len, /* output: length of extent */
52 int *stat) /* output: success/failure */
53{
54 xfs_alloc_block_t *block; /* btree block */
55#ifdef DEBUG
56 int error; /* error return value */
57#endif
58 int ptr; /* record number */
59
60 ptr = cur->bc_ptrs[0];
61 block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
62#ifdef DEBUG
63 if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
64 return error;
65#endif
66 /*
67 * Off the right end or left end, return failure.
68 */
Christoph Hellwig16259e72005-11-02 15:11:25 +110069 if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 *stat = 0;
71 return 0;
72 }
73 /*
74 * Point to the record and extract its data.
75 */
76 {
77 xfs_alloc_rec_t *rec; /* record data */
78
79 rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +110080 *bno = be32_to_cpu(rec->ar_startblock);
81 *len = be32_to_cpu(rec->ar_blockcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 }
83 *stat = 1;
84 return 0;
85}
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Christoph Hellwig561f7d12008-10-30 16:53:59 +110088STATIC struct xfs_btree_cur *
89xfs_allocbt_dup_cursor(
90 struct xfs_btree_cur *cur)
91{
92 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
93 cur->bc_private.a.agbp, cur->bc_private.a.agno,
94 cur->bc_btnum);
95}
96
Christoph Hellwig344207c2008-10-30 16:57:16 +110097STATIC void
98xfs_allocbt_set_root(
99 struct xfs_btree_cur *cur,
100 union xfs_btree_ptr *ptr,
101 int inc)
102{
103 struct xfs_buf *agbp = cur->bc_private.a.agbp;
104 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
105 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
106 int btnum = cur->bc_btnum;
107
108 ASSERT(ptr->s != 0);
109
110 agf->agf_roots[btnum] = ptr->s;
111 be32_add_cpu(&agf->agf_levels[btnum], inc);
112 cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc;
113
114 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
115}
116
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100117STATIC int
118xfs_allocbt_alloc_block(
119 struct xfs_btree_cur *cur,
120 union xfs_btree_ptr *start,
121 union xfs_btree_ptr *new,
122 int length,
123 int *stat)
124{
125 int error;
126 xfs_agblock_t bno;
127
128 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
129
130 /* Allocate the new block from the freelist. If we can't, give up. */
131 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
132 &bno, 1);
133 if (error) {
134 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
135 return error;
136 }
137
138 if (bno == NULLAGBLOCK) {
139 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
140 *stat = 0;
141 return 0;
142 }
143
144 xfs_trans_agbtree_delta(cur->bc_tp, 1);
145 new->s = cpu_to_be32(bno);
146
147 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
148 *stat = 1;
149 return 0;
150}
151
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100152STATIC int
153xfs_allocbt_free_block(
154 struct xfs_btree_cur *cur,
155 struct xfs_buf *bp)
156{
157 struct xfs_buf *agbp = cur->bc_private.a.agbp;
158 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
159 xfs_agblock_t bno;
160 int error;
161
162 bno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(bp));
163 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
164 if (error)
165 return error;
166
167 /*
168 * Since blocks move to the free list without the coordination used in
169 * xfs_bmap_finish, we can't allow block to be available for
170 * reallocation and non-transaction writing (user data) until we know
171 * that the transaction that moved it to the free list is permanently
172 * on disk. We track the blocks by declaring these blocks as "busy";
173 * the busy list is maintained on a per-ag basis and each transaction
174 * records which entries should be removed when the iclog commits to
175 * disk. If a busy block is allocated, the iclog is pushed up to the
176 * LSN that freed the block.
177 */
178 xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
179 xfs_trans_agbtree_delta(cur->bc_tp, -1);
180 return 0;
181}
182
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100183/*
184 * Update the longest extent in the AGF
185 */
186STATIC void
187xfs_allocbt_update_lastrec(
188 struct xfs_btree_cur *cur,
189 struct xfs_btree_block *block,
190 union xfs_btree_rec *rec,
191 int ptr,
192 int reason)
193{
194 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
195 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
196 __be32 len;
Christoph Hellwig91cca5d2008-10-30 16:58:01 +1100197 int numrecs;
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100198
199 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
200
201 switch (reason) {
202 case LASTREC_UPDATE:
203 /*
204 * If this is the last leaf block and it's the last record,
205 * then update the size of the longest extent in the AG.
206 */
207 if (ptr != xfs_btree_get_numrecs(block))
208 return;
209 len = rec->alloc.ar_blockcount;
210 break;
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100211 case LASTREC_INSREC:
212 if (be32_to_cpu(rec->alloc.ar_blockcount) <=
213 be32_to_cpu(agf->agf_longest))
214 return;
215 len = rec->alloc.ar_blockcount;
216 break;
Christoph Hellwig91cca5d2008-10-30 16:58:01 +1100217 case LASTREC_DELREC:
218 numrecs = xfs_btree_get_numrecs(block);
219 if (ptr <= numrecs)
220 return;
221 ASSERT(ptr == numrecs + 1);
222
223 if (numrecs) {
224 xfs_alloc_rec_t *rrp;
225
226 rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
227 len = rrp->ar_blockcount;
228 } else {
229 len = 0;
230 }
231
232 break;
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100233 default:
234 ASSERT(0);
235 return;
236 }
237
238 agf->agf_longest = len;
239 cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
240 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
241}
242
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100243STATIC int
Christoph Hellwig91cca5d2008-10-30 16:58:01 +1100244xfs_allocbt_get_minrecs(
245 struct xfs_btree_cur *cur,
246 int level)
247{
248 return cur->bc_mp->m_alloc_mnr[level != 0];
249}
250
251STATIC int
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100252xfs_allocbt_get_maxrecs(
253 struct xfs_btree_cur *cur,
254 int level)
255{
256 return cur->bc_mp->m_alloc_mxr[level != 0];
257}
258
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100259STATIC void
260xfs_allocbt_init_key_from_rec(
261 union xfs_btree_key *key,
262 union xfs_btree_rec *rec)
263{
264 ASSERT(rec->alloc.ar_startblock != 0);
265
266 key->alloc.ar_startblock = rec->alloc.ar_startblock;
267 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
268}
269
270STATIC void
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100271xfs_allocbt_init_rec_from_key(
272 union xfs_btree_key *key,
273 union xfs_btree_rec *rec)
274{
275 ASSERT(key->alloc.ar_startblock != 0);
276
277 rec->alloc.ar_startblock = key->alloc.ar_startblock;
278 rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
279}
280
281STATIC void
282xfs_allocbt_init_rec_from_cur(
283 struct xfs_btree_cur *cur,
284 union xfs_btree_rec *rec)
285{
286 ASSERT(cur->bc_rec.a.ar_startblock != 0);
287
288 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
289 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
290}
291
292STATIC void
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100293xfs_allocbt_init_ptr_from_cur(
294 struct xfs_btree_cur *cur,
295 union xfs_btree_ptr *ptr)
296{
297 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
298
299 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
300 ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
301
302 ptr->s = agf->agf_roots[cur->bc_btnum];
303}
304
305STATIC __int64_t
306xfs_allocbt_key_diff(
307 struct xfs_btree_cur *cur,
308 union xfs_btree_key *key)
309{
310 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
311 xfs_alloc_key_t *kp = &key->alloc;
312 __int64_t diff;
313
314 if (cur->bc_btnum == XFS_BTNUM_BNO) {
315 return (__int64_t)be32_to_cpu(kp->ar_startblock) -
316 rec->ar_startblock;
317 }
318
319 diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
320 if (diff)
321 return diff;
322
323 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
324}
325
Christoph Hellwig91cca5d2008-10-30 16:58:01 +1100326STATIC int
327xfs_allocbt_kill_root(
328 struct xfs_btree_cur *cur,
329 struct xfs_buf *bp,
330 int level,
331 union xfs_btree_ptr *newroot)
332{
333 int error;
334
335 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
336 XFS_BTREE_STATS_INC(cur, killroot);
337
338 /*
339 * Update the root pointer, decreasing the level by 1 and then
340 * free the old root.
341 */
342 xfs_allocbt_set_root(cur, newroot, -1);
343 error = xfs_allocbt_free_block(cur, bp);
344 if (error) {
345 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
346 return error;
347 }
348
349 XFS_BTREE_STATS_INC(cur, free);
350
351 xfs_btree_setbuf(cur, level, NULL);
352 cur->bc_nlevels--;
353
354 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
355 return 0;
356}
357
Christoph Hellwig8c4ed632008-10-30 16:55:13 +1100358#ifdef XFS_BTREE_TRACE
359ktrace_t *xfs_allocbt_trace_buf;
360
361STATIC void
362xfs_allocbt_trace_enter(
363 struct xfs_btree_cur *cur,
364 const char *func,
365 char *s,
366 int type,
367 int line,
368 __psunsigned_t a0,
369 __psunsigned_t a1,
370 __psunsigned_t a2,
371 __psunsigned_t a3,
372 __psunsigned_t a4,
373 __psunsigned_t a5,
374 __psunsigned_t a6,
375 __psunsigned_t a7,
376 __psunsigned_t a8,
377 __psunsigned_t a9,
378 __psunsigned_t a10)
379{
380 ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type,
381 (void *)func, (void *)s, NULL, (void *)cur,
382 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
383 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
384 (void *)a8, (void *)a9, (void *)a10);
385}
386
387STATIC void
388xfs_allocbt_trace_cursor(
389 struct xfs_btree_cur *cur,
390 __uint32_t *s0,
391 __uint64_t *l0,
392 __uint64_t *l1)
393{
394 *s0 = cur->bc_private.a.agno;
395 *l0 = cur->bc_rec.a.ar_startblock;
396 *l1 = cur->bc_rec.a.ar_blockcount;
397}
398
399STATIC void
400xfs_allocbt_trace_key(
401 struct xfs_btree_cur *cur,
402 union xfs_btree_key *key,
403 __uint64_t *l0,
404 __uint64_t *l1)
405{
406 *l0 = be32_to_cpu(key->alloc.ar_startblock);
407 *l1 = be32_to_cpu(key->alloc.ar_blockcount);
408}
409
410STATIC void
411xfs_allocbt_trace_record(
412 struct xfs_btree_cur *cur,
413 union xfs_btree_rec *rec,
414 __uint64_t *l0,
415 __uint64_t *l1,
416 __uint64_t *l2)
417{
418 *l0 = be32_to_cpu(rec->alloc.ar_startblock);
419 *l1 = be32_to_cpu(rec->alloc.ar_blockcount);
420 *l2 = 0;
421}
422#endif /* XFS_BTREE_TRACE */
423
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100424static const struct xfs_btree_ops xfs_allocbt_ops = {
Christoph Hellwig65f1eae2008-10-30 16:55:34 +1100425 .rec_len = sizeof(xfs_alloc_rec_t),
426 .key_len = sizeof(xfs_alloc_key_t),
427
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100428 .dup_cursor = xfs_allocbt_dup_cursor,
Christoph Hellwig344207c2008-10-30 16:57:16 +1100429 .set_root = xfs_allocbt_set_root,
Christoph Hellwig91cca5d2008-10-30 16:58:01 +1100430 .kill_root = xfs_allocbt_kill_root,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100431 .alloc_block = xfs_allocbt_alloc_block,
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100432 .free_block = xfs_allocbt_free_block,
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100433 .update_lastrec = xfs_allocbt_update_lastrec,
Christoph Hellwig91cca5d2008-10-30 16:58:01 +1100434 .get_minrecs = xfs_allocbt_get_minrecs,
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100435 .get_maxrecs = xfs_allocbt_get_maxrecs,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100436 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100437 .init_rec_from_key = xfs_allocbt_init_rec_from_key,
438 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100439 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
440 .key_diff = xfs_allocbt_key_diff,
Christoph Hellwig8c4ed632008-10-30 16:55:13 +1100441
442#ifdef XFS_BTREE_TRACE
443 .trace_enter = xfs_allocbt_trace_enter,
444 .trace_cursor = xfs_allocbt_trace_cursor,
445 .trace_key = xfs_allocbt_trace_key,
446 .trace_record = xfs_allocbt_trace_record,
447#endif
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100448};
449
450/*
451 * Allocate a new allocation btree cursor.
452 */
453struct xfs_btree_cur * /* new alloc btree cursor */
454xfs_allocbt_init_cursor(
455 struct xfs_mount *mp, /* file system mount point */
456 struct xfs_trans *tp, /* transaction pointer */
457 struct xfs_buf *agbp, /* buffer for agf structure */
458 xfs_agnumber_t agno, /* allocation group number */
459 xfs_btnum_t btnum) /* btree identifier */
460{
461 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
462 struct xfs_btree_cur *cur;
463
464 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
465
466 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
467
468 cur->bc_tp = tp;
469 cur->bc_mp = mp;
470 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
471 cur->bc_btnum = btnum;
472 cur->bc_blocklog = mp->m_sb.sb_blocklog;
473
474 cur->bc_ops = &xfs_allocbt_ops;
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100475 if (btnum == XFS_BTNUM_CNT)
476 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100477
478 cur->bc_private.a.agbp = agbp;
479 cur->bc_private.a.agno = agno;
480
481 return cur;
482}