/src/e2fsprogs/lib/ext2fs/block.c
Line | Count | Source |
1 | | /* |
2 | | * block.c --- iterate over all blocks in an inode |
3 | | * |
4 | | * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o. |
5 | | * |
6 | | * %Begin-Header% |
7 | | * This file may be redistributed under the terms of the GNU Library |
8 | | * General Public License, version 2. |
9 | | * %End-Header% |
10 | | */ |
11 | | |
12 | | #include "config.h" |
13 | | #include <stdio.h> |
14 | | #include <string.h> |
15 | | #if HAVE_UNISTD_H |
16 | | #include <unistd.h> |
17 | | #endif |
18 | | |
19 | | #include "ext2_fs.h" |
20 | | #include "ext2fs.h" |
21 | | |
22 | | struct block_context { |
23 | | ext2_filsys fs; |
24 | | int (*func)(ext2_filsys fs, |
25 | | blk64_t *blocknr, |
26 | | e2_blkcnt_t bcount, |
27 | | blk64_t ref_blk, |
28 | | int ref_offset, |
29 | | void *priv_data); |
30 | | e2_blkcnt_t bcount; |
31 | | int bsize; |
32 | | int flags; |
33 | | errcode_t errcode; |
34 | | char *ind_buf; |
35 | | char *dind_buf; |
36 | | char *tind_buf; |
37 | | void *priv_data; |
38 | | }; |
39 | | |
40 | | #define check_for_ro_violation_return(ctx, ret) \ |
41 | 0 | do { \ |
42 | 0 | if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \ |
43 | 0 | ((ret) & BLOCK_CHANGED)) { \ |
44 | 0 | (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \ |
45 | 0 | ret |= BLOCK_ABORT | BLOCK_ERROR; \ |
46 | 0 | return ret; \ |
47 | 0 | } \ |
48 | 0 | } while (0) |
49 | | |
50 | | #define check_for_ro_violation_goto(ctx, ret, label) \ |
51 | 0 | do { \ |
52 | 0 | if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) && \ |
53 | 0 | ((ret) & BLOCK_CHANGED)) { \ |
54 | 0 | (ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE; \ |
55 | 0 | ret |= BLOCK_ABORT | BLOCK_ERROR; \ |
56 | 0 | goto label; \ |
57 | 0 | } \ |
58 | 0 | } while (0) |
59 | | |
60 | | static int block_iterate_ind(blk_t *ind_block, blk_t ref_block, |
61 | | int ref_offset, struct block_context *ctx) |
62 | 0 | { |
63 | 0 | int ret = 0, changed = 0; |
64 | 0 | int i, flags, limit, offset; |
65 | 0 | blk_t *block_nr; |
66 | 0 | blk64_t blk64; |
67 | |
|
68 | 0 | limit = ctx->fs->blocksize >> 2; |
69 | 0 | if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && |
70 | 0 | !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) { |
71 | 0 | blk64 = *ind_block; |
72 | 0 | ret = (*ctx->func)(ctx->fs, &blk64, |
73 | 0 | BLOCK_COUNT_IND, ref_block, |
74 | 0 | ref_offset, ctx->priv_data); |
75 | 0 | *ind_block = blk64; |
76 | 0 | } |
77 | 0 | check_for_ro_violation_return(ctx, ret); |
78 | 0 | if (!*ind_block || (ret & BLOCK_ABORT)) { |
79 | 0 | ctx->bcount += limit; |
80 | 0 | return ret; |
81 | 0 | } |
82 | 0 | if (*ind_block >= ext2fs_blocks_count(ctx->fs->super) || |
83 | 0 | *ind_block < ctx->fs->super->s_first_data_block) { |
84 | 0 | ctx->errcode = EXT2_ET_BAD_IND_BLOCK; |
85 | 0 | ret |= BLOCK_ERROR; |
86 | 0 | return ret; |
87 | 0 | } |
88 | 0 | ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block, |
89 | 0 | ctx->ind_buf); |
90 | 0 | if (ctx->errcode) { |
91 | 0 | ret |= BLOCK_ERROR; |
92 | 0 | return ret; |
93 | 0 | } |
94 | | |
95 | 0 | block_nr = (blk_t *) ctx->ind_buf; |
96 | 0 | offset = 0; |
97 | 0 | if (ctx->flags & BLOCK_FLAG_APPEND) { |
98 | 0 | for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) { |
99 | 0 | blk64 = *block_nr; |
100 | 0 | flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount, |
101 | 0 | *ind_block, offset, |
102 | 0 | ctx->priv_data); |
103 | 0 | *block_nr = blk64; |
104 | 0 | changed |= flags; |
105 | 0 | if (flags & BLOCK_ABORT) { |
106 | 0 | ret |= BLOCK_ABORT; |
107 | 0 | break; |
108 | 0 | } |
109 | 0 | offset += sizeof(blk_t); |
110 | 0 | } |
111 | 0 | } else { |
112 | 0 | for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) { |
113 | 0 | if (*block_nr == 0) |
114 | 0 | goto skip_sparse; |
115 | 0 | blk64 = *block_nr; |
116 | 0 | flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount, |
117 | 0 | *ind_block, offset, |
118 | 0 | ctx->priv_data); |
119 | 0 | *block_nr = blk64; |
120 | 0 | changed |= flags; |
121 | 0 | if (flags & BLOCK_ABORT) { |
122 | 0 | ret |= BLOCK_ABORT; |
123 | 0 | break; |
124 | 0 | } |
125 | 0 | skip_sparse: |
126 | 0 | offset += sizeof(blk_t); |
127 | 0 | } |
128 | 0 | } |
129 | 0 | check_for_ro_violation_return(ctx, changed); |
130 | 0 | if (changed & BLOCK_CHANGED) { |
131 | 0 | ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block, |
132 | 0 | ctx->ind_buf); |
133 | 0 | if (ctx->errcode) |
134 | 0 | ret |= BLOCK_ERROR | BLOCK_ABORT; |
135 | 0 | } |
136 | 0 | if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && |
137 | 0 | !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && |
138 | 0 | !(ret & BLOCK_ABORT)) { |
139 | 0 | blk64 = *ind_block; |
140 | 0 | ret |= (*ctx->func)(ctx->fs, &blk64, |
141 | 0 | BLOCK_COUNT_IND, ref_block, |
142 | 0 | ref_offset, ctx->priv_data); |
143 | 0 | *ind_block = blk64; |
144 | 0 | } |
145 | 0 | check_for_ro_violation_return(ctx, ret); |
146 | 0 | return ret; |
147 | 0 | } |
148 | | |
149 | | static int block_iterate_dind(blk_t *dind_block, blk_t ref_block, |
150 | | int ref_offset, struct block_context *ctx) |
151 | 0 | { |
152 | 0 | int ret = 0, changed = 0; |
153 | 0 | int i, flags, limit, offset; |
154 | 0 | blk_t *block_nr; |
155 | 0 | blk64_t blk64; |
156 | |
|
157 | 0 | limit = ctx->fs->blocksize >> 2; |
158 | 0 | if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE | |
159 | 0 | BLOCK_FLAG_DATA_ONLY))) { |
160 | 0 | blk64 = *dind_block; |
161 | 0 | ret = (*ctx->func)(ctx->fs, &blk64, |
162 | 0 | BLOCK_COUNT_DIND, ref_block, |
163 | 0 | ref_offset, ctx->priv_data); |
164 | 0 | *dind_block = blk64; |
165 | 0 | } |
166 | 0 | check_for_ro_violation_return(ctx, ret); |
167 | 0 | if (!*dind_block || (ret & BLOCK_ABORT)) { |
168 | 0 | ctx->bcount += limit*limit; |
169 | 0 | return ret; |
170 | 0 | } |
171 | 0 | if (*dind_block >= ext2fs_blocks_count(ctx->fs->super) || |
172 | 0 | *dind_block < ctx->fs->super->s_first_data_block) { |
173 | 0 | ctx->errcode = EXT2_ET_BAD_DIND_BLOCK; |
174 | 0 | ret |= BLOCK_ERROR; |
175 | 0 | return ret; |
176 | 0 | } |
177 | 0 | ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block, |
178 | 0 | ctx->dind_buf); |
179 | 0 | if (ctx->errcode) { |
180 | 0 | ret |= BLOCK_ERROR; |
181 | 0 | return ret; |
182 | 0 | } |
183 | | |
184 | 0 | block_nr = (blk_t *) ctx->dind_buf; |
185 | 0 | offset = 0; |
186 | 0 | if (ctx->flags & BLOCK_FLAG_APPEND) { |
187 | 0 | for (i = 0; i < limit; i++, block_nr++) { |
188 | 0 | flags = block_iterate_ind(block_nr, |
189 | 0 | *dind_block, offset, |
190 | 0 | ctx); |
191 | 0 | changed |= flags; |
192 | 0 | if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { |
193 | 0 | ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); |
194 | 0 | break; |
195 | 0 | } |
196 | 0 | offset += sizeof(blk_t); |
197 | 0 | } |
198 | 0 | } else { |
199 | 0 | for (i = 0; i < limit; i++, block_nr++) { |
200 | 0 | if (*block_nr == 0) { |
201 | 0 | ctx->bcount += limit; |
202 | 0 | continue; |
203 | 0 | } |
204 | 0 | flags = block_iterate_ind(block_nr, |
205 | 0 | *dind_block, offset, |
206 | 0 | ctx); |
207 | 0 | changed |= flags; |
208 | 0 | if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { |
209 | 0 | ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); |
210 | 0 | break; |
211 | 0 | } |
212 | 0 | offset += sizeof(blk_t); |
213 | 0 | } |
214 | 0 | } |
215 | 0 | check_for_ro_violation_return(ctx, changed); |
216 | 0 | if (changed & BLOCK_CHANGED) { |
217 | 0 | ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block, |
218 | 0 | ctx->dind_buf); |
219 | 0 | if (ctx->errcode) |
220 | 0 | ret |= BLOCK_ERROR | BLOCK_ABORT; |
221 | 0 | } |
222 | 0 | if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && |
223 | 0 | !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && |
224 | 0 | !(ret & BLOCK_ABORT)) { |
225 | 0 | blk64 = *dind_block; |
226 | 0 | ret |= (*ctx->func)(ctx->fs, &blk64, |
227 | 0 | BLOCK_COUNT_DIND, ref_block, |
228 | 0 | ref_offset, ctx->priv_data); |
229 | 0 | *dind_block = blk64; |
230 | 0 | } |
231 | 0 | check_for_ro_violation_return(ctx, ret); |
232 | 0 | return ret; |
233 | 0 | } |
234 | | |
235 | | static int block_iterate_tind(blk_t *tind_block, blk_t ref_block, |
236 | | int ref_offset, struct block_context *ctx) |
237 | 0 | { |
238 | 0 | int ret = 0, changed = 0; |
239 | 0 | int i, flags, limit, offset; |
240 | 0 | blk_t *block_nr; |
241 | 0 | blk64_t blk64; |
242 | |
|
243 | 0 | limit = ctx->fs->blocksize >> 2; |
244 | 0 | if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE | |
245 | 0 | BLOCK_FLAG_DATA_ONLY))) { |
246 | 0 | blk64 = *tind_block; |
247 | 0 | ret = (*ctx->func)(ctx->fs, &blk64, |
248 | 0 | BLOCK_COUNT_TIND, ref_block, |
249 | 0 | ref_offset, ctx->priv_data); |
250 | 0 | *tind_block = blk64; |
251 | 0 | } |
252 | 0 | check_for_ro_violation_return(ctx, ret); |
253 | 0 | if (!*tind_block || (ret & BLOCK_ABORT)) { |
254 | 0 | ctx->bcount += ((unsigned long long) limit)*limit*limit; |
255 | 0 | return ret; |
256 | 0 | } |
257 | 0 | if (*tind_block >= ext2fs_blocks_count(ctx->fs->super) || |
258 | 0 | *tind_block < ctx->fs->super->s_first_data_block) { |
259 | 0 | ctx->errcode = EXT2_ET_BAD_TIND_BLOCK; |
260 | 0 | ret |= BLOCK_ERROR; |
261 | 0 | return ret; |
262 | 0 | } |
263 | 0 | ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block, |
264 | 0 | ctx->tind_buf); |
265 | 0 | if (ctx->errcode) { |
266 | 0 | ret |= BLOCK_ERROR; |
267 | 0 | return ret; |
268 | 0 | } |
269 | | |
270 | 0 | block_nr = (blk_t *) ctx->tind_buf; |
271 | 0 | offset = 0; |
272 | 0 | if (ctx->flags & BLOCK_FLAG_APPEND) { |
273 | 0 | for (i = 0; i < limit; i++, block_nr++) { |
274 | 0 | flags = block_iterate_dind(block_nr, |
275 | 0 | *tind_block, |
276 | 0 | offset, ctx); |
277 | 0 | changed |= flags; |
278 | 0 | if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { |
279 | 0 | ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); |
280 | 0 | break; |
281 | 0 | } |
282 | 0 | offset += sizeof(blk_t); |
283 | 0 | } |
284 | 0 | } else { |
285 | 0 | for (i = 0; i < limit; i++, block_nr++) { |
286 | 0 | if (*block_nr == 0) { |
287 | 0 | ctx->bcount += limit*limit; |
288 | 0 | continue; |
289 | 0 | } |
290 | 0 | flags = block_iterate_dind(block_nr, |
291 | 0 | *tind_block, |
292 | 0 | offset, ctx); |
293 | 0 | changed |= flags; |
294 | 0 | if (flags & (BLOCK_ABORT | BLOCK_ERROR)) { |
295 | 0 | ret |= flags & (BLOCK_ABORT | BLOCK_ERROR); |
296 | 0 | break; |
297 | 0 | } |
298 | 0 | offset += sizeof(blk_t); |
299 | 0 | } |
300 | 0 | } |
301 | 0 | check_for_ro_violation_return(ctx, changed); |
302 | 0 | if (changed & BLOCK_CHANGED) { |
303 | 0 | ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block, |
304 | 0 | ctx->tind_buf); |
305 | 0 | if (ctx->errcode) |
306 | 0 | ret |= BLOCK_ERROR | BLOCK_ABORT; |
307 | 0 | } |
308 | 0 | if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) && |
309 | 0 | !(ctx->flags & BLOCK_FLAG_DATA_ONLY) && |
310 | 0 | !(ret & BLOCK_ABORT)) { |
311 | 0 | blk64 = *tind_block; |
312 | 0 | ret |= (*ctx->func)(ctx->fs, &blk64, |
313 | 0 | BLOCK_COUNT_TIND, ref_block, |
314 | 0 | ref_offset, ctx->priv_data); |
315 | 0 | *tind_block = blk64; |
316 | 0 | } |
317 | 0 | check_for_ro_violation_return(ctx, ret); |
318 | 0 | return ret; |
319 | 0 | } |
320 | | |
321 | | errcode_t ext2fs_block_iterate3(ext2_filsys fs, |
322 | | ext2_ino_t ino, |
323 | | int flags, |
324 | | char *block_buf, |
325 | | int (*func)(ext2_filsys fs, |
326 | | blk64_t *blocknr, |
327 | | e2_blkcnt_t blockcnt, |
328 | | blk64_t ref_blk, |
329 | | int ref_offset, |
330 | | void *priv_data), |
331 | | void *priv_data) |
332 | 0 | { |
333 | 0 | int i; |
334 | 0 | int r, ret = 0; |
335 | 0 | struct ext2_inode inode; |
336 | 0 | errcode_t retval; |
337 | 0 | struct block_context ctx; |
338 | 0 | int limit; |
339 | 0 | blk64_t blk64; |
340 | |
|
341 | 0 | EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS); |
342 | | |
343 | 0 | ctx.errcode = ext2fs_read_inode(fs, ino, &inode); |
344 | 0 | if (ctx.errcode) |
345 | 0 | return ctx.errcode; |
346 | | |
347 | | /* |
348 | | * An inode with inline data has no blocks over which to |
349 | | * iterate, so return an error code indicating this fact. |
350 | | */ |
351 | 0 | if (inode.i_flags & EXT4_INLINE_DATA_FL) |
352 | 0 | return EXT2_ET_INLINE_DATA_CANT_ITERATE; |
353 | | |
354 | | /* |
355 | | * Check to see if we need to limit large files |
356 | | */ |
357 | 0 | if (flags & BLOCK_FLAG_NO_LARGE) { |
358 | 0 | if (!LINUX_S_ISDIR(inode.i_mode) && |
359 | 0 | (inode.i_size_high != 0)) |
360 | 0 | return EXT2_ET_FILE_TOO_BIG; |
361 | 0 | } |
362 | | |
363 | 0 | limit = fs->blocksize >> 2; |
364 | |
|
365 | 0 | ctx.fs = fs; |
366 | 0 | ctx.func = func; |
367 | 0 | ctx.priv_data = priv_data; |
368 | 0 | ctx.flags = flags; |
369 | 0 | ctx.bcount = 0; |
370 | 0 | if (block_buf) { |
371 | 0 | ctx.ind_buf = block_buf; |
372 | 0 | } else { |
373 | 0 | retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf); |
374 | 0 | if (retval) |
375 | 0 | return retval; |
376 | 0 | } |
377 | 0 | ctx.dind_buf = ctx.ind_buf + fs->blocksize; |
378 | 0 | ctx.tind_buf = ctx.dind_buf + fs->blocksize; |
379 | | |
380 | | /* |
381 | | * Iterate over the HURD translator block (if present) |
382 | | */ |
383 | 0 | if ((fs->super->s_creator_os == EXT2_OS_HURD) && |
384 | 0 | !(flags & BLOCK_FLAG_DATA_ONLY)) { |
385 | 0 | if (inode.osd1.hurd1.h_i_translator) { |
386 | 0 | blk64 = inode.osd1.hurd1.h_i_translator; |
387 | 0 | ret |= (*ctx.func)(fs, &blk64, |
388 | 0 | BLOCK_COUNT_TRANSLATOR, |
389 | 0 | 0, 0, priv_data); |
390 | 0 | inode.osd1.hurd1.h_i_translator = (blk_t) blk64; |
391 | 0 | if (ret & BLOCK_ABORT) |
392 | 0 | goto abort_exit; |
393 | 0 | check_for_ro_violation_goto(&ctx, ret, abort_exit); |
394 | 0 | } |
395 | 0 | } |
396 | | |
397 | 0 | if (inode.i_flags & EXT4_EXTENTS_FL) { |
398 | 0 | ext2_extent_handle_t handle; |
399 | 0 | struct ext2fs_extent extent, next; |
400 | 0 | e2_blkcnt_t blockcnt = 0; |
401 | 0 | blk64_t blk, new_blk; |
402 | 0 | int op = EXT2_EXTENT_ROOT; |
403 | 0 | int uninit; |
404 | 0 | unsigned int j; |
405 | |
|
406 | 0 | ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle); |
407 | 0 | if (ctx.errcode) |
408 | 0 | goto abort_exit; |
409 | | |
410 | 0 | while (1) { |
411 | 0 | if (op == EXT2_EXTENT_CURRENT) |
412 | 0 | ctx.errcode = 0; |
413 | 0 | else |
414 | 0 | ctx.errcode = ext2fs_extent_get(handle, op, |
415 | 0 | &extent); |
416 | 0 | if (ctx.errcode) { |
417 | 0 | if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT) |
418 | 0 | break; |
419 | 0 | ctx.errcode = 0; |
420 | 0 | if (!(flags & BLOCK_FLAG_APPEND)) |
421 | 0 | break; |
422 | 0 | next_block_set: |
423 | 0 | blk = 0; |
424 | 0 | r = (*ctx.func)(fs, &blk, blockcnt, |
425 | 0 | 0, 0, priv_data); |
426 | 0 | ret |= r; |
427 | 0 | check_for_ro_violation_goto(&ctx, ret, |
428 | 0 | extent_done); |
429 | 0 | if (r & BLOCK_CHANGED) { |
430 | 0 | ctx.errcode = |
431 | 0 | ext2fs_extent_set_bmap(handle, |
432 | 0 | (blk64_t) blockcnt++, |
433 | 0 | (blk64_t) blk, 0); |
434 | 0 | if (ctx.errcode || (ret & BLOCK_ABORT)) |
435 | 0 | break; |
436 | 0 | if (blk) |
437 | 0 | goto next_block_set; |
438 | 0 | } |
439 | 0 | break; |
440 | 0 | } |
441 | | |
442 | 0 | op = EXT2_EXTENT_NEXT; |
443 | 0 | blk = extent.e_pblk; |
444 | 0 | if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) { |
445 | 0 | if (ctx.flags & BLOCK_FLAG_DATA_ONLY) |
446 | 0 | continue; |
447 | 0 | if ((!(extent.e_flags & |
448 | 0 | EXT2_EXTENT_FLAGS_SECOND_VISIT) && |
449 | 0 | !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) || |
450 | 0 | ((extent.e_flags & |
451 | 0 | EXT2_EXTENT_FLAGS_SECOND_VISIT) && |
452 | 0 | (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) { |
453 | 0 | ret |= (*ctx.func)(fs, &blk, |
454 | 0 | -1, 0, 0, priv_data); |
455 | 0 | if (ret & BLOCK_CHANGED) { |
456 | 0 | extent.e_pblk = blk; |
457 | 0 | ctx.errcode = |
458 | 0 | ext2fs_extent_replace(handle, 0, &extent); |
459 | 0 | if (ctx.errcode) |
460 | 0 | break; |
461 | 0 | } |
462 | 0 | if (ret & BLOCK_ABORT) |
463 | 0 | break; |
464 | 0 | } |
465 | 0 | continue; |
466 | 0 | } |
467 | 0 | uninit = 0; |
468 | 0 | if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT) |
469 | 0 | uninit = EXT2_EXTENT_SET_BMAP_UNINIT; |
470 | | |
471 | | /* |
472 | | * Get the next extent before we start messing |
473 | | * with the current extent |
474 | | */ |
475 | 0 | retval = ext2fs_extent_get(handle, op, &next); |
476 | |
|
477 | | #if 0 |
478 | | printf("lblk %llu pblk %llu len %d blockcnt %llu\n", |
479 | | extent.e_lblk, extent.e_pblk, |
480 | | extent.e_len, blockcnt); |
481 | | #endif |
482 | 0 | if (extent.e_lblk + extent.e_len <= (blk64_t) blockcnt) |
483 | 0 | continue; |
484 | 0 | if (extent.e_lblk > (blk64_t) blockcnt) |
485 | 0 | blockcnt = extent.e_lblk; |
486 | 0 | j = blockcnt - extent.e_lblk; |
487 | 0 | blk += j; |
488 | 0 | for (blockcnt = extent.e_lblk, j = 0; |
489 | 0 | j < extent.e_len; |
490 | 0 | blk++, blockcnt++, j++) { |
491 | 0 | new_blk = blk; |
492 | 0 | r = (*ctx.func)(fs, &new_blk, blockcnt, |
493 | 0 | 0, 0, priv_data); |
494 | 0 | ret |= r; |
495 | 0 | check_for_ro_violation_goto(&ctx, ret, |
496 | 0 | extent_done); |
497 | 0 | if (r & BLOCK_CHANGED) { |
498 | 0 | ctx.errcode = |
499 | 0 | ext2fs_extent_set_bmap(handle, |
500 | 0 | (blk64_t) blockcnt, |
501 | 0 | new_blk, uninit); |
502 | 0 | if (ctx.errcode) |
503 | 0 | goto extent_done; |
504 | 0 | } |
505 | 0 | if (ret & BLOCK_ABORT) |
506 | 0 | goto extent_done; |
507 | 0 | } |
508 | 0 | if (retval == 0) { |
509 | 0 | extent = next; |
510 | 0 | op = EXT2_EXTENT_CURRENT; |
511 | 0 | } |
512 | 0 | } |
513 | | |
514 | 0 | extent_done: |
515 | 0 | ext2fs_extent_free(handle); |
516 | 0 | ret |= BLOCK_ERROR; /* ctx.errcode is always valid here */ |
517 | 0 | goto errout; |
518 | 0 | } |
519 | | |
520 | | /* |
521 | | * Iterate over normal data blocks |
522 | | */ |
523 | 0 | for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) { |
524 | 0 | if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) { |
525 | 0 | blk64 = inode.i_block[i]; |
526 | 0 | ret |= (*ctx.func)(fs, &blk64, ctx.bcount, 0, i, |
527 | 0 | priv_data); |
528 | 0 | inode.i_block[i] = (blk_t) blk64; |
529 | 0 | if (ret & BLOCK_ABORT) |
530 | 0 | goto abort_exit; |
531 | 0 | } |
532 | 0 | } |
533 | 0 | check_for_ro_violation_goto(&ctx, ret, abort_exit); |
534 | 0 | if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { |
535 | 0 | ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK], |
536 | 0 | 0, EXT2_IND_BLOCK, &ctx); |
537 | 0 | if (ret & BLOCK_ABORT) |
538 | 0 | goto abort_exit; |
539 | 0 | } else |
540 | 0 | ctx.bcount += limit; |
541 | 0 | if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { |
542 | 0 | ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK], |
543 | 0 | 0, EXT2_DIND_BLOCK, &ctx); |
544 | 0 | if (ret & BLOCK_ABORT) |
545 | 0 | goto abort_exit; |
546 | 0 | } else |
547 | 0 | ctx.bcount += limit * limit; |
548 | 0 | if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) { |
549 | 0 | ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK], |
550 | 0 | 0, EXT2_TIND_BLOCK, &ctx); |
551 | 0 | if (ret & BLOCK_ABORT) |
552 | 0 | goto abort_exit; |
553 | 0 | } |
554 | | |
555 | 0 | abort_exit: |
556 | 0 | if (ret & BLOCK_CHANGED) { |
557 | 0 | retval = ext2fs_write_inode(fs, ino, &inode); |
558 | 0 | if (retval) { |
559 | 0 | ret |= BLOCK_ERROR; |
560 | 0 | ctx.errcode = retval; |
561 | 0 | } |
562 | 0 | } |
563 | 0 | errout: |
564 | 0 | if (!block_buf) |
565 | 0 | ext2fs_free_mem(&ctx.ind_buf); |
566 | |
|
567 | 0 | return (ret & BLOCK_ERROR) ? ctx.errcode : 0; |
568 | 0 | } |
569 | | |
570 | | /* |
571 | | * Emulate the old ext2fs_block_iterate function! |
572 | | */ |
573 | | |
574 | | struct xlate64 { |
575 | | int (*func)(ext2_filsys fs, |
576 | | blk_t *blocknr, |
577 | | e2_blkcnt_t blockcnt, |
578 | | blk_t ref_blk, |
579 | | int ref_offset, |
580 | | void *priv_data); |
581 | | void *real_private; |
582 | | }; |
583 | | |
584 | | static int xlate64_func(ext2_filsys fs, blk64_t *blocknr, |
585 | | e2_blkcnt_t blockcnt, blk64_t ref_blk, |
586 | | int ref_offset, void *priv_data) |
587 | 0 | { |
588 | 0 | struct xlate64 *xl = (struct xlate64 *) priv_data; |
589 | 0 | int ret; |
590 | 0 | blk_t block32 = *blocknr; |
591 | | |
592 | 0 | ret = (*xl->func)(fs, &block32, blockcnt, (blk_t) ref_blk, ref_offset, |
593 | 0 | xl->real_private); |
594 | 0 | *blocknr = block32; |
595 | 0 | return ret; |
596 | 0 | } |
597 | | |
598 | | errcode_t ext2fs_block_iterate2(ext2_filsys fs, |
599 | | ext2_ino_t ino, |
600 | | int flags, |
601 | | char *block_buf, |
602 | | int (*func)(ext2_filsys fs, |
603 | | blk_t *blocknr, |
604 | | e2_blkcnt_t blockcnt, |
605 | | blk_t ref_blk, |
606 | | int ref_offset, |
607 | | void *priv_data), |
608 | | void *priv_data) |
609 | 0 | { |
610 | 0 | struct xlate64 xl; |
611 | |
|
612 | 0 | xl.real_private = priv_data; |
613 | 0 | xl.func = func; |
614 | |
|
615 | 0 | return ext2fs_block_iterate3(fs, ino, flags, block_buf, |
616 | 0 | xlate64_func, &xl); |
617 | 0 | } |
618 | | |
619 | | |
620 | | struct xlate { |
621 | | int (*func)(ext2_filsys fs, |
622 | | blk_t *blocknr, |
623 | | int bcount, |
624 | | void *priv_data); |
625 | | void *real_private; |
626 | | }; |
627 | | |
628 | | #ifdef __TURBOC__ |
629 | | #pragma argsused |
630 | | #endif |
631 | | static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt, |
632 | | blk_t ref_block EXT2FS_ATTR((unused)), |
633 | | int ref_offset EXT2FS_ATTR((unused)), |
634 | | void *priv_data) |
635 | 0 | { |
636 | 0 | struct xlate *xl = (struct xlate *) priv_data; |
637 | |
|
638 | 0 | return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private); |
639 | 0 | } |
640 | | |
641 | | errcode_t ext2fs_block_iterate(ext2_filsys fs, |
642 | | ext2_ino_t ino, |
643 | | int flags, |
644 | | char *block_buf, |
645 | | int (*func)(ext2_filsys fs, |
646 | | blk_t *blocknr, |
647 | | int blockcnt, |
648 | | void *priv_data), |
649 | | void *priv_data) |
650 | 0 | { |
651 | 0 | struct xlate xl; |
652 | |
|
653 | 0 | xl.real_private = priv_data; |
654 | 0 | xl.func = func; |
655 | |
|
656 | 0 | return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags, |
657 | 0 | block_buf, xlate_func, &xl); |
658 | 0 | } |
659 | | |