forked from Minki/linux
ntfs3 for 6.0
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEh0DEKNP0I9IjwfWEqbAzH4MkB7YFAmL7xrEACgkQqbAzH4Mk B7Y3iw/+KZLidS4J9Lq+BhoDj21brkrvE+iiBhLClN8LQNn7jwkF+mTpYcGxvm+3 fgrT5YTUt6OBx9N06qjUnKKcwBrOq1xFo6hdlCVHQiZbEUgYsSglE6xVdyt5RICb AVD6T7uKnHq4cQHe8796h23/Vfd3z2rAetvz5w/6Gd+mOpguLj+sx7QGE7R6o3V/ uvbxj8z7cFkrVy6b7H5gaDUFzAhVxBzZY+2P1CUOg4uUy0YI0PeUbJli8zt/qORP Mr5mTEeKc1sxWzuDASppjgCPjdQVN+jgy7hQEpC6SLDR5HgtjncCCRE+dA1ZcnQm PQCG1Xn8CSII8bDCu6Lvr6KtxhRBdG/wb99zpn50wBmb6CzMJGOGmBwrPMMhW8Zo 8ZBYHCY5YgDuXNkFpQrivayrADGaLhmAl1BTjXTDCQU7MoxxFsPO8D/swufvNf3W 5eC5ezQ8FY3sSHRuDvVGHe8djvgsGvfxQAMrbfMJqEBFuPg3EYjJOeRpZK6NUyk4 U31Jtz0hYSuU0dnoEaZFQ23/K7/vl2kile6VlNPApFR+y8OtSARN++Za58xdtg2y H8XmEbuN/g8XxPXz55Smf4Y8RzaIZ2S56aA19nBqza5o1a6gQUr2SomTHGRLJsFQ 5xLyuUBrZRDxS8jcxa5TTfj7CNFBJkaxtXU8M66vIzXhXcm5+9U= =iz2l -----END PGP SIGNATURE----- Merge tag 'ntfs3_for_6.0' of https://github.com/Paragon-Software-Group/linux-ntfs3 Pull ntfs3 updates from Konstantin Komarov: - implement FALLOC_FL_INSERT_RANGE - fix some logic errors - fixed xfstests (tested on x86_64): generic/064 generic/213 generic/300 generic/361 generic/449 generic/485 - some dead code removed or refactored * tag 'ntfs3_for_6.0' of https://github.com/Paragon-Software-Group/linux-ntfs3: (39 commits) fs/ntfs3: uninitialized variable in ntfs_set_acl_ex() fs/ntfs3: Remove unused function wnd_bits fs/ntfs3: Make ni_ins_new_attr return error fs/ntfs3: Create MFT zone only if length is large enough fs/ntfs3: Refactoring attr_insert_range to restore after errors fs/ntfs3: Refactoring attr_punch_hole to restore after errors fs/ntfs3: Refactoring attr_set_size to restore after errors fs/ntfs3: New function ntfs_bad_inode fs/ntfs3: Make MFT zone less fragmented fs/ntfs3: Check possible errors in run_pack in advance fs/ntfs3: Added comments to frecord functions fs/ntfs3: Fill duplicate info in ni_add_name fs/ntfs3: Make static function attr_load_runs fs/ntfs3: Add new argument is_mft to ntfs_mark_rec_free fs/ntfs3: Remove unused mi_mark_free fs/ntfs3: Fix very fragmented case in attr_punch_hole fs/ntfs3: Fix work with fragmented xattr fs/ntfs3: Make ntfs_fallocate return -ENOSPC instead of -EFBIG fs/ntfs3: extend ni_insert_nonresident to return inserted ATTR_LIST_ENTRY fs/ntfs3: Check reserved size for maximum allowed ...
This commit is contained in:
commit
3b06a27557
@ -84,8 +84,8 @@ static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
|
||||
/*
|
||||
* attr_load_runs - Load all runs stored in @attr.
|
||||
*/
|
||||
int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
|
||||
struct runs_tree *run, const CLST *vcn)
|
||||
static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
|
||||
struct runs_tree *run, const CLST *vcn)
|
||||
{
|
||||
int err;
|
||||
CLST svcn = le64_to_cpu(attr->nres.svcn);
|
||||
@ -140,7 +140,10 @@ failed:
|
||||
}
|
||||
|
||||
if (lcn != SPARSE_LCN) {
|
||||
mark_as_free_ex(sbi, lcn, clen, trim);
|
||||
if (sbi) {
|
||||
/* mark bitmap range [lcn + clen) as free and trim clusters. */
|
||||
mark_as_free_ex(sbi, lcn, clen, trim);
|
||||
}
|
||||
dn += clen;
|
||||
}
|
||||
|
||||
@ -173,7 +176,6 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
|
||||
{
|
||||
int err;
|
||||
CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
|
||||
struct wnd_bitmap *wnd = &sbi->used.bitmap;
|
||||
size_t cnt = run->count;
|
||||
|
||||
for (;;) {
|
||||
@ -196,9 +198,7 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
|
||||
/* Add new fragment into run storage. */
|
||||
if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
|
||||
/* Undo last 'ntfs_look_for_free_space' */
|
||||
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
|
||||
wnd_set_free(wnd, lcn, flen);
|
||||
up_write(&wnd->rw_lock);
|
||||
mark_as_free_ex(sbi, lcn, len, false);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -320,7 +320,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
|
||||
|
||||
err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
|
||||
attr_s->name_len, run, 0, alen,
|
||||
attr_s->flags, &attr, NULL);
|
||||
attr_s->flags, &attr, NULL, NULL);
|
||||
if (err)
|
||||
goto out3;
|
||||
|
||||
@ -419,40 +419,44 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
struct mft_inode *mi, *mi_b;
|
||||
CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
|
||||
CLST next_svcn, pre_alloc = -1, done = 0;
|
||||
bool is_ext;
|
||||
bool is_ext, is_bad = false;
|
||||
u32 align;
|
||||
struct MFT_REC *rec;
|
||||
|
||||
again:
|
||||
alen = 0;
|
||||
le_b = NULL;
|
||||
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
|
||||
&mi_b);
|
||||
if (!attr_b) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
if (!attr_b->non_res) {
|
||||
err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
|
||||
&attr_b);
|
||||
if (err || !attr_b->non_res)
|
||||
goto out;
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Return if file is still resident. */
|
||||
if (!attr_b->non_res)
|
||||
goto ok1;
|
||||
|
||||
/* Layout of records may be changed, so do a full search. */
|
||||
goto again;
|
||||
}
|
||||
|
||||
is_ext = is_attr_ext(attr_b);
|
||||
|
||||
again_1:
|
||||
align = sbi->cluster_size;
|
||||
|
||||
if (is_ext)
|
||||
align <<= attr_b->nres.c_unit;
|
||||
|
||||
old_valid = le64_to_cpu(attr_b->nres.valid_size);
|
||||
old_size = le64_to_cpu(attr_b->nres.data_size);
|
||||
old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
|
||||
|
||||
again_1:
|
||||
old_alen = old_alloc >> cluster_bits;
|
||||
|
||||
new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
|
||||
@ -475,24 +479,27 @@ again_1:
|
||||
mi = mi_b;
|
||||
} else if (!le_b) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
} else {
|
||||
le = le_b;
|
||||
attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
|
||||
&mi);
|
||||
if (!attr) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
next_le_1:
|
||||
svcn = le64_to_cpu(attr->nres.svcn);
|
||||
evcn = le64_to_cpu(attr->nres.evcn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Here we have:
|
||||
* attr,mi,le - last attribute segment (containing 'vcn').
|
||||
* attr_b,mi_b,le_b - base (primary) attribute segment.
|
||||
*/
|
||||
next_le:
|
||||
rec = mi->mrec;
|
||||
|
||||
err = attr_load_runs(attr, ni, run, NULL);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -507,6 +514,13 @@ next_le:
|
||||
goto ok;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add clusters. In simple case we have to:
|
||||
* - allocate space (vcn, lcn, len)
|
||||
* - update packed run in 'mi'
|
||||
* - update attr->nres.evcn
|
||||
* - update attr_b->nres.data_size/attr_b->nres.alloc_size
|
||||
*/
|
||||
to_allocate = new_alen - old_alen;
|
||||
add_alloc_in_same_attr_seg:
|
||||
lcn = 0;
|
||||
@ -520,9 +534,11 @@ add_alloc_in_same_attr_seg:
|
||||
pre_alloc = 0;
|
||||
if (type == ATTR_DATA && !name_len &&
|
||||
sbi->options->prealloc) {
|
||||
CLST new_alen2 = bytes_to_cluster(
|
||||
sbi, get_pre_allocated(new_size));
|
||||
pre_alloc = new_alen2 - new_alen;
|
||||
pre_alloc =
|
||||
bytes_to_cluster(
|
||||
sbi,
|
||||
get_pre_allocated(new_size)) -
|
||||
new_alen;
|
||||
}
|
||||
|
||||
/* Get the last LCN to allocate from. */
|
||||
@ -580,7 +596,7 @@ add_alloc_in_same_attr_seg:
|
||||
pack_runs:
|
||||
err = mi_pack_runs(mi, attr, run, vcn - svcn);
|
||||
if (err)
|
||||
goto out;
|
||||
goto undo_1;
|
||||
|
||||
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
|
||||
new_alloc_tmp = (u64)next_svcn << cluster_bits;
|
||||
@ -614,7 +630,7 @@ pack_runs:
|
||||
if (type == ATTR_LIST) {
|
||||
err = ni_expand_list(ni);
|
||||
if (err)
|
||||
goto out;
|
||||
goto undo_2;
|
||||
if (next_svcn < vcn)
|
||||
goto pack_runs;
|
||||
|
||||
@ -624,8 +640,9 @@ pack_runs:
|
||||
|
||||
if (!ni->attr_list.size) {
|
||||
err = ni_create_attr_list(ni);
|
||||
/* In case of error layout of records is not changed. */
|
||||
if (err)
|
||||
goto out;
|
||||
goto undo_2;
|
||||
/* Layout of records is changed. */
|
||||
}
|
||||
|
||||
@ -637,9 +654,25 @@ pack_runs:
|
||||
/* Insert new attribute segment. */
|
||||
err = ni_insert_nonresident(ni, type, name, name_len, run,
|
||||
next_svcn, vcn - next_svcn,
|
||||
attr_b->flags, &attr, &mi);
|
||||
if (err)
|
||||
goto out;
|
||||
attr_b->flags, &attr, &mi, NULL);
|
||||
|
||||
/*
|
||||
* Layout of records maybe changed.
|
||||
* Find base attribute to update.
|
||||
*/
|
||||
le_b = NULL;
|
||||
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
|
||||
NULL, &mi_b);
|
||||
if (!attr_b) {
|
||||
err = -EINVAL;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
/* ni_insert_nonresident failed. */
|
||||
attr = NULL;
|
||||
goto undo_2;
|
||||
}
|
||||
|
||||
if (!is_mft)
|
||||
run_truncate_head(run, evcn + 1);
|
||||
@ -647,38 +680,31 @@ pack_runs:
|
||||
svcn = le64_to_cpu(attr->nres.svcn);
|
||||
evcn = le64_to_cpu(attr->nres.evcn);
|
||||
|
||||
le_b = NULL;
|
||||
/*
|
||||
* Layout of records maybe changed.
|
||||
* Find base attribute to update.
|
||||
* Attribute is in consistency state.
|
||||
* Save this point to restore to if next steps fail.
|
||||
*/
|
||||
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
|
||||
NULL, &mi_b);
|
||||
if (!attr_b) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
|
||||
attr_b->nres.data_size = attr_b->nres.alloc_size;
|
||||
attr_b->nres.valid_size = attr_b->nres.alloc_size;
|
||||
old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
|
||||
attr_b->nres.valid_size = attr_b->nres.data_size =
|
||||
attr_b->nres.alloc_size = cpu_to_le64(old_size);
|
||||
mi_b->dirty = true;
|
||||
goto again_1;
|
||||
}
|
||||
|
||||
if (new_size != old_size ||
|
||||
(new_alloc != old_alloc && !keep_prealloc)) {
|
||||
/*
|
||||
* Truncate clusters. In simple case we have to:
|
||||
* - update packed run in 'mi'
|
||||
* - update attr->nres.evcn
|
||||
* - update attr_b->nres.data_size/attr_b->nres.alloc_size
|
||||
* - mark and trim clusters as free (vcn, lcn, len)
|
||||
*/
|
||||
CLST dlen = 0;
|
||||
|
||||
vcn = max(svcn, new_alen);
|
||||
new_alloc_tmp = (u64)vcn << cluster_bits;
|
||||
|
||||
alen = 0;
|
||||
err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
|
||||
true);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
run_truncate(run, vcn);
|
||||
|
||||
if (vcn > svcn) {
|
||||
err = mi_pack_runs(mi, attr, run, vcn - svcn);
|
||||
if (err)
|
||||
@ -697,7 +723,7 @@ pack_runs:
|
||||
|
||||
if (!al_remove_le(ni, le)) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
|
||||
@ -723,13 +749,21 @@ pack_runs:
|
||||
attr_b->nres.valid_size =
|
||||
attr_b->nres.alloc_size;
|
||||
}
|
||||
|
||||
if (is_ext)
|
||||
le64_sub_cpu(&attr_b->nres.total_size,
|
||||
((u64)alen << cluster_bits));
|
||||
|
||||
mi_b->dirty = true;
|
||||
|
||||
err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
|
||||
true);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (is_ext) {
|
||||
/* dlen - really deallocated clusters. */
|
||||
le64_sub_cpu(&attr_b->nres.total_size,
|
||||
((u64)dlen << cluster_bits));
|
||||
}
|
||||
|
||||
run_truncate(run, vcn);
|
||||
|
||||
if (new_alloc_tmp <= new_alloc)
|
||||
goto ok;
|
||||
|
||||
@ -747,7 +781,7 @@ pack_runs:
|
||||
if (le->type != type || le->name_len != name_len ||
|
||||
memcmp(le_name(le), name, name_len * sizeof(short))) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
err = ni_load_mi(ni, le, &mi);
|
||||
@ -757,7 +791,7 @@ pack_runs:
|
||||
attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
|
||||
if (!attr) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
}
|
||||
goto next_le_1;
|
||||
}
|
||||
@ -772,13 +806,13 @@ ok:
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (!err && attr_b && ret)
|
||||
ok1:
|
||||
if (ret)
|
||||
*ret = attr_b;
|
||||
|
||||
/* Update inode_set_bytes. */
|
||||
if (!err && ((type == ATTR_DATA && !name_len) ||
|
||||
(type == ATTR_ALLOC && name == I30_NAME))) {
|
||||
if (((type == ATTR_DATA && !name_len) ||
|
||||
(type == ATTR_ALLOC && name == I30_NAME))) {
|
||||
bool dirty = false;
|
||||
|
||||
if (ni->vfs_inode.i_size != new_size) {
|
||||
@ -786,7 +820,7 @@ out:
|
||||
dirty = true;
|
||||
}
|
||||
|
||||
if (attr_b && attr_b->non_res) {
|
||||
if (attr_b->non_res) {
|
||||
new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
|
||||
if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
|
||||
inode_set_bytes(&ni->vfs_inode, new_alloc);
|
||||
@ -800,6 +834,47 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
undo_2:
|
||||
vcn -= alen;
|
||||
attr_b->nres.data_size = cpu_to_le64(old_size);
|
||||
attr_b->nres.valid_size = cpu_to_le64(old_valid);
|
||||
attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
|
||||
|
||||
/* Restore 'attr' and 'mi'. */
|
||||
if (attr)
|
||||
goto restore_run;
|
||||
|
||||
if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
|
||||
svcn <= le64_to_cpu(attr_b->nres.evcn)) {
|
||||
attr = attr_b;
|
||||
le = le_b;
|
||||
mi = mi_b;
|
||||
} else if (!le_b) {
|
||||
err = -EINVAL;
|
||||
goto bad_inode;
|
||||
} else {
|
||||
le = le_b;
|
||||
attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
|
||||
&svcn, &mi);
|
||||
if (!attr)
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
restore_run:
|
||||
if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
|
||||
is_bad = true;
|
||||
|
||||
undo_1:
|
||||
run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
|
||||
|
||||
run_truncate(run, vcn);
|
||||
out:
|
||||
if (is_bad) {
|
||||
bad_inode:
|
||||
_ntfs_bad_inode(&ni->vfs_inode);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -855,7 +930,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
|
||||
goto out;
|
||||
}
|
||||
|
||||
asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
|
||||
asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
|
||||
if (vcn >= asize) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
@ -1047,7 +1122,7 @@ ins_ext:
|
||||
if (evcn1 > next_svcn) {
|
||||
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
|
||||
next_svcn, evcn1 - next_svcn,
|
||||
attr_b->flags, &attr, &mi);
|
||||
attr_b->flags, &attr, &mi, NULL);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
@ -1173,7 +1248,7 @@ int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
{
|
||||
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
||||
u8 cluster_bits = sbi->cluster_bits;
|
||||
CLST vcn = from >> cluster_bits;
|
||||
CLST vcn;
|
||||
CLST vcn_last = (to - 1) >> cluster_bits;
|
||||
CLST lcn, clen;
|
||||
int err;
|
||||
@ -1647,7 +1722,7 @@ ins_ext:
|
||||
if (evcn1 > next_svcn) {
|
||||
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
|
||||
next_svcn, evcn1 - next_svcn,
|
||||
attr_b->flags, &attr, &mi);
|
||||
attr_b->flags, &attr, &mi, NULL);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
@ -1812,18 +1887,12 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
|
||||
err = ni_insert_nonresident(
|
||||
ni, ATTR_DATA, NULL, 0, run, next_svcn,
|
||||
evcn1 - eat - next_svcn, a_flags, &attr,
|
||||
&mi);
|
||||
&mi, &le);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Layout of records maybe changed. */
|
||||
attr_b = NULL;
|
||||
le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
|
||||
&next_svcn);
|
||||
if (!le) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Free all allocated memory. */
|
||||
@ -1918,7 +1987,7 @@ next_attr:
|
||||
out:
|
||||
up_write(&ni->file.run_lock);
|
||||
if (err)
|
||||
make_bad_inode(&ni->vfs_inode);
|
||||
_ntfs_bad_inode(&ni->vfs_inode);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1936,9 +2005,11 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
|
||||
struct ATTRIB *attr = NULL, *attr_b;
|
||||
struct ATTR_LIST_ENTRY *le, *le_b;
|
||||
struct mft_inode *mi, *mi_b;
|
||||
CLST svcn, evcn1, vcn, len, end, alen, dealloc;
|
||||
CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
|
||||
u64 total_size, alloc_size;
|
||||
u32 mask;
|
||||
__le16 a_flags;
|
||||
struct runs_tree run2;
|
||||
|
||||
if (!bytes)
|
||||
return 0;
|
||||
@ -1990,6 +2061,9 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
|
||||
}
|
||||
|
||||
down_write(&ni->file.run_lock);
|
||||
run_init(&run2);
|
||||
run_truncate(run, 0);
|
||||
|
||||
/*
|
||||
* Enumerate all attribute segments and punch hole where necessary.
|
||||
*/
|
||||
@ -1997,10 +2071,11 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
|
||||
vcn = vbo >> sbi->cluster_bits;
|
||||
len = bytes >> sbi->cluster_bits;
|
||||
end = vcn + len;
|
||||
dealloc = 0;
|
||||
hole = 0;
|
||||
|
||||
svcn = le64_to_cpu(attr_b->nres.svcn);
|
||||
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
|
||||
a_flags = attr_b->flags;
|
||||
|
||||
if (svcn <= vcn && vcn < evcn1) {
|
||||
attr = attr_b;
|
||||
@ -2008,14 +2083,14 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
|
||||
mi = mi_b;
|
||||
} else if (!le_b) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
} else {
|
||||
le = le_b;
|
||||
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
|
||||
&mi);
|
||||
if (!attr) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
svcn = le64_to_cpu(attr->nres.svcn);
|
||||
@ -2023,49 +2098,91 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
|
||||
}
|
||||
|
||||
while (svcn < end) {
|
||||
CLST vcn1, zero, dealloc2;
|
||||
CLST vcn1, zero, hole2 = hole;
|
||||
|
||||
err = attr_load_runs(attr, ni, run, &svcn);
|
||||
if (err)
|
||||
goto out;
|
||||
goto done;
|
||||
vcn1 = max(vcn, svcn);
|
||||
zero = min(end, evcn1) - vcn1;
|
||||
|
||||
dealloc2 = dealloc;
|
||||
err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
|
||||
/*
|
||||
* Check range [vcn1 + zero).
|
||||
* Calculate how many clusters there are.
|
||||
* Don't do any destructive actions.
|
||||
*/
|
||||
err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
|
||||
if (err)
|
||||
goto out;
|
||||
goto done;
|
||||
|
||||
if (dealloc2 == dealloc) {
|
||||
/* Looks like the required range is already sparsed. */
|
||||
} else {
|
||||
if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
|
||||
false)) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
/* Check if required range is already hole. */
|
||||
if (hole2 == hole)
|
||||
goto next_attr;
|
||||
|
||||
err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
|
||||
if (err)
|
||||
goto out;
|
||||
/* Make a clone of run to undo. */
|
||||
err = run_clone(run, &run2);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
/* Make a hole range (sparse) [vcn1 + zero). */
|
||||
if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Update run in attribute segment. */
|
||||
err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
|
||||
if (err)
|
||||
goto done;
|
||||
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
|
||||
if (next_svcn < evcn1) {
|
||||
/* Insert new attribute segment. */
|
||||
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
|
||||
next_svcn,
|
||||
evcn1 - next_svcn, a_flags,
|
||||
&attr, &mi, &le);
|
||||
if (err)
|
||||
goto undo_punch;
|
||||
|
||||
/* Layout of records maybe changed. */
|
||||
attr_b = NULL;
|
||||
}
|
||||
|
||||
/* Real deallocate. Should not fail. */
|
||||
run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
|
||||
|
||||
next_attr:
|
||||
/* Free all allocated memory. */
|
||||
run_truncate(run, 0);
|
||||
|
||||
if (evcn1 >= alen)
|
||||
break;
|
||||
|
||||
/* Get next attribute segment. */
|
||||
attr = ni_enum_attr_ex(ni, attr, &le, &mi);
|
||||
if (!attr) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
svcn = le64_to_cpu(attr->nres.svcn);
|
||||
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
|
||||
}
|
||||
|
||||
total_size -= (u64)dealloc << sbi->cluster_bits;
|
||||
done:
|
||||
if (!hole)
|
||||
goto out;
|
||||
|
||||
if (!attr_b) {
|
||||
attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
|
||||
&mi_b);
|
||||
if (!attr_b) {
|
||||
err = -EINVAL;
|
||||
goto bad_inode;
|
||||
}
|
||||
}
|
||||
|
||||
total_size -= (u64)hole << sbi->cluster_bits;
|
||||
attr_b->nres.total_size = cpu_to_le64(total_size);
|
||||
mi_b->dirty = true;
|
||||
|
||||
@ -2075,9 +2192,263 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
|
||||
mark_inode_dirty(&ni->vfs_inode);
|
||||
|
||||
out:
|
||||
run_close(&run2);
|
||||
up_write(&ni->file.run_lock);
|
||||
return err;
|
||||
|
||||
bad_inode:
|
||||
_ntfs_bad_inode(&ni->vfs_inode);
|
||||
goto out;
|
||||
|
||||
undo_punch:
|
||||
/*
|
||||
* Restore packed runs.
|
||||
* 'mi_pack_runs' should not fail, cause we restore original.
|
||||
*/
|
||||
if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
|
||||
goto bad_inode;
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* attr_insert_range - Insert range (hole) in file.
|
||||
* Not for normal files.
|
||||
*/
|
||||
int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
|
||||
{
|
||||
int err = 0;
|
||||
struct runs_tree *run = &ni->file.run;
|
||||
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
||||
struct ATTRIB *attr = NULL, *attr_b;
|
||||
struct ATTR_LIST_ENTRY *le, *le_b;
|
||||
struct mft_inode *mi, *mi_b;
|
||||
CLST vcn, svcn, evcn1, len, next_svcn;
|
||||
u64 data_size, alloc_size;
|
||||
u32 mask;
|
||||
__le16 a_flags;
|
||||
|
||||
if (!bytes)
|
||||
return 0;
|
||||
|
||||
le_b = NULL;
|
||||
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
|
||||
if (!attr_b)
|
||||
return -ENOENT;
|
||||
|
||||
if (!is_attr_ext(attr_b)) {
|
||||
/* It was checked above. See fallocate. */
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!attr_b->non_res) {
|
||||
data_size = le32_to_cpu(attr_b->res.data_size);
|
||||
alloc_size = data_size;
|
||||
mask = sbi->cluster_mask; /* cluster_size - 1 */
|
||||
} else {
|
||||
data_size = le64_to_cpu(attr_b->nres.data_size);
|
||||
alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
|
||||
mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
|
||||
}
|
||||
|
||||
if (vbo > data_size) {
|
||||
/* Insert range after the file size is not allowed. */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((vbo & mask) || (bytes & mask)) {
|
||||
/* Allow to insert only frame aligned ranges. */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* valid_size <= data_size <= alloc_size
|
||||
* Check alloc_size for maximum possible.
|
||||
*/
|
||||
if (bytes > sbi->maxbytes_sparse - alloc_size)
|
||||
return -EFBIG;
|
||||
|
||||
vcn = vbo >> sbi->cluster_bits;
|
||||
len = bytes >> sbi->cluster_bits;
|
||||
|
||||
down_write(&ni->file.run_lock);
|
||||
|
||||
if (!attr_b->non_res) {
|
||||
err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
|
||||
data_size + bytes, NULL, false, NULL);
|
||||
|
||||
le_b = NULL;
|
||||
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
|
||||
&mi_b);
|
||||
if (!attr_b) {
|
||||
err = -EINVAL;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (!attr_b->non_res) {
|
||||
/* Still resident. */
|
||||
char *data = Add2Ptr(attr_b, attr_b->res.data_off);
|
||||
|
||||
memmove(data + bytes, data, bytes);
|
||||
memset(data, 0, bytes);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Resident files becomes nonresident. */
|
||||
data_size = le64_to_cpu(attr_b->nres.data_size);
|
||||
alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enumerate all attribute segments and shift start vcn.
|
||||
*/
|
||||
a_flags = attr_b->flags;
|
||||
svcn = le64_to_cpu(attr_b->nres.svcn);
|
||||
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
|
||||
|
||||
if (svcn <= vcn && vcn < evcn1) {
|
||||
attr = attr_b;
|
||||
le = le_b;
|
||||
mi = mi_b;
|
||||
} else if (!le_b) {
|
||||
err = -EINVAL;
|
||||
goto bad_inode;
|
||||
} else {
|
||||
le = le_b;
|
||||
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
|
||||
&mi);
|
||||
if (!attr) {
|
||||
err = -EINVAL;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
svcn = le64_to_cpu(attr->nres.svcn);
|
||||
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
|
||||
}
|
||||
|
||||
run_truncate(run, 0); /* clear cached values. */
|
||||
err = attr_load_runs(attr, ni, run, NULL);
|
||||
if (err)
|
||||
make_bad_inode(&ni->vfs_inode);
|
||||
goto out;
|
||||
|
||||
if (!run_insert_range(run, vcn, len)) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Try to pack in current record as much as possible. */
|
||||
err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
|
||||
|
||||
while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
|
||||
attr->type == ATTR_DATA && !attr->name_len) {
|
||||
le64_add_cpu(&attr->nres.svcn, len);
|
||||
le64_add_cpu(&attr->nres.evcn, len);
|
||||
if (le) {
|
||||
le->vcn = attr->nres.svcn;
|
||||
ni->attr_list.dirty = true;
|
||||
}
|
||||
mi->dirty = true;
|
||||
}
|
||||
|
||||
if (next_svcn < evcn1 + len) {
|
||||
err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
|
||||
next_svcn, evcn1 + len - next_svcn,
|
||||
a_flags, NULL, NULL, NULL);
|
||||
|
||||
le_b = NULL;
|
||||
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
|
||||
&mi_b);
|
||||
if (!attr_b) {
|
||||
err = -EINVAL;
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
/* ni_insert_nonresident failed. Try to undo. */
|
||||
goto undo_insert_range;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update primary attribute segment.
|
||||
*/
|
||||
if (vbo <= ni->i_valid)
|
||||
ni->i_valid += bytes;
|
||||
|
||||
attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
|
||||
attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
|
||||
|
||||
/* ni->valid may be not equal valid_size (temporary). */
|
||||
if (ni->i_valid > data_size + bytes)
|
||||
attr_b->nres.valid_size = attr_b->nres.data_size;
|
||||
else
|
||||
attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
|
||||
mi_b->dirty = true;
|
||||
|
||||
done:
|
||||
ni->vfs_inode.i_size += bytes;
|
||||
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
|
||||
mark_inode_dirty(&ni->vfs_inode);
|
||||
|
||||
out:
|
||||
run_truncate(run, 0); /* clear cached values. */
|
||||
|
||||
up_write(&ni->file.run_lock);
|
||||
|
||||
return err;
|
||||
|
||||
bad_inode:
|
||||
_ntfs_bad_inode(&ni->vfs_inode);
|
||||
goto out;
|
||||
|
||||
undo_insert_range:
|
||||
svcn = le64_to_cpu(attr_b->nres.svcn);
|
||||
evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
|
||||
|
||||
if (svcn <= vcn && vcn < evcn1) {
|
||||
attr = attr_b;
|
||||
le = le_b;
|
||||
mi = mi_b;
|
||||
} else if (!le_b) {
|
||||
goto bad_inode;
|
||||
} else {
|
||||
le = le_b;
|
||||
attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
|
||||
&mi);
|
||||
if (!attr) {
|
||||
goto bad_inode;
|
||||
}
|
||||
|
||||
svcn = le64_to_cpu(attr->nres.svcn);
|
||||
evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
|
||||
}
|
||||
|
||||
if (attr_load_runs(attr, ni, run, NULL))
|
||||
goto bad_inode;
|
||||
|
||||
if (!run_collapse_range(run, vcn, len))
|
||||
goto bad_inode;
|
||||
|
||||
if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
|
||||
goto bad_inode;
|
||||
|
||||
while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
|
||||
attr->type == ATTR_DATA && !attr->name_len) {
|
||||
le64_sub_cpu(&attr->nres.svcn, len);
|
||||
le64_sub_cpu(&attr->nres.evcn, len);
|
||||
if (le) {
|
||||
le->vcn = attr->nres.svcn;
|
||||
ni->attr_list.dirty = true;
|
||||
}
|
||||
mi->dirty = true;
|
||||
}
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
@ -51,11 +51,6 @@ void ntfs3_exit_bitmap(void)
|
||||
kmem_cache_destroy(ntfs_enode_cachep);
|
||||
}
|
||||
|
||||
static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
|
||||
{
|
||||
return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* wnd_scan
|
||||
*
|
||||
@ -1333,9 +1328,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
|
||||
if (!new_free)
|
||||
return -ENOMEM;
|
||||
|
||||
if (new_free != wnd->free_bits)
|
||||
memcpy(new_free, wnd->free_bits,
|
||||
wnd->nwnd * sizeof(short));
|
||||
memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
|
||||
memset(new_free + wnd->nwnd, 0,
|
||||
(new_wnd - wnd->nwnd) * sizeof(short));
|
||||
kfree(wnd->free_bits);
|
||||
@ -1395,9 +1388,8 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
|
||||
|
||||
void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
|
||||
{
|
||||
size_t zlen;
|
||||
size_t zlen = wnd->zone_end - wnd->zone_bit;
|
||||
|
||||
zlen = wnd->zone_end - wnd->zone_bit;
|
||||
if (zlen)
|
||||
wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
|
||||
|
||||
|
110
fs/ntfs3/file.c
110
fs/ntfs3/file.c
@ -530,21 +530,35 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
|
||||
static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
|
||||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct ntfs_sb_info *sbi = sb->s_fs_info;
|
||||
struct ntfs_inode *ni = ntfs_i(inode);
|
||||
loff_t end = vbo + len;
|
||||
loff_t vbo_down = round_down(vbo, PAGE_SIZE);
|
||||
loff_t i_size;
|
||||
bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
|
||||
loff_t i_size, new_size;
|
||||
bool map_locked;
|
||||
int err;
|
||||
|
||||
/* No support for dir. */
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Return error if mode is not supported. */
|
||||
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
|
||||
FALLOC_FL_COLLAPSE_RANGE)) {
|
||||
/*
|
||||
* vfs_fallocate checks all possible combinations of mode.
|
||||
* Do additional checks here before ntfs_set_state(dirty).
|
||||
*/
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
if (!is_supported_holes)
|
||||
return -EOPNOTSUPP;
|
||||
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
|
||||
} else if (mode & FALLOC_FL_INSERT_RANGE) {
|
||||
if (!is_supported_holes)
|
||||
return -EOPNOTSUPP;
|
||||
} else if (mode &
|
||||
~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
|
||||
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
|
||||
ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
|
||||
mode);
|
||||
return -EOPNOTSUPP;
|
||||
@ -554,6 +568,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
|
||||
|
||||
inode_lock(inode);
|
||||
i_size = inode->i_size;
|
||||
new_size = max(end, i_size);
|
||||
map_locked = false;
|
||||
|
||||
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
|
||||
/* Should never be here, see ntfs_file_open. */
|
||||
@ -561,38 +577,27 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
|
||||
FALLOC_FL_INSERT_RANGE)) {
|
||||
inode_dio_wait(inode);
|
||||
filemap_invalidate_lock(mapping);
|
||||
map_locked = true;
|
||||
}
|
||||
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
u32 frame_size;
|
||||
loff_t mask, vbo_a, end_a, tmp;
|
||||
|
||||
if (!(mode & FALLOC_FL_KEEP_SIZE)) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = filemap_write_and_wait_range(inode->i_mapping, vbo,
|
||||
end - 1);
|
||||
err = filemap_write_and_wait_range(mapping, vbo, end - 1);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = filemap_write_and_wait_range(inode->i_mapping, end,
|
||||
LLONG_MAX);
|
||||
err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
inode_dio_wait(inode);
|
||||
|
||||
truncate_pagecache(inode, vbo_down);
|
||||
|
||||
if (!is_sparsed(ni) && !is_compressed(ni)) {
|
||||
/*
|
||||
* Normal file, can't make hole.
|
||||
* TODO: Try to find way to save info about hole.
|
||||
*/
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ni_lock(ni);
|
||||
err = attr_punch_hole(ni, vbo, len, &frame_size);
|
||||
ni_unlock(ni);
|
||||
@ -624,17 +629,11 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
|
||||
ni_unlock(ni);
|
||||
}
|
||||
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
|
||||
if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write tail of the last page before removed range since
|
||||
* it will get removed from the page cache below.
|
||||
*/
|
||||
err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
|
||||
vbo);
|
||||
err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -642,34 +641,58 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
|
||||
* Write data that will be shifted to preserve them
|
||||
* when discarding page cache below.
|
||||
*/
|
||||
err = filemap_write_and_wait_range(inode->i_mapping, end,
|
||||
LLONG_MAX);
|
||||
err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Wait for existing dio to complete. */
|
||||
inode_dio_wait(inode);
|
||||
|
||||
truncate_pagecache(inode, vbo_down);
|
||||
|
||||
ni_lock(ni);
|
||||
err = attr_collapse_range(ni, vbo, len);
|
||||
ni_unlock(ni);
|
||||
} else if (mode & FALLOC_FL_INSERT_RANGE) {
|
||||
/* Check new size. */
|
||||
err = inode_newsize_ok(inode, new_size);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Write out all dirty pages. */
|
||||
err = filemap_write_and_wait_range(mapping, vbo_down,
|
||||
LLONG_MAX);
|
||||
if (err)
|
||||
goto out;
|
||||
truncate_pagecache(inode, vbo_down);
|
||||
|
||||
ni_lock(ni);
|
||||
err = attr_insert_range(ni, vbo, len);
|
||||
ni_unlock(ni);
|
||||
} else {
|
||||
/*
|
||||
* Normal file: Allocate clusters, do not change 'valid' size.
|
||||
*/
|
||||
loff_t new_size = max(end, i_size);
|
||||
/* Check new size. */
|
||||
|
||||
/* generic/213: expected -ENOSPC instead of -EFBIG. */
|
||||
if (!is_supported_holes) {
|
||||
loff_t to_alloc = new_size - inode_get_bytes(inode);
|
||||
|
||||
if (to_alloc > 0 &&
|
||||
(to_alloc >> sbi->cluster_bits) >
|
||||
wnd_zeroes(&sbi->used.bitmap)) {
|
||||
err = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
err = inode_newsize_ok(inode, new_size);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Allocate clusters, do not change 'valid' size.
|
||||
*/
|
||||
err = ntfs_set_size(inode, new_size);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (is_sparsed(ni) || is_compressed(ni)) {
|
||||
if (is_supported_holes) {
|
||||
CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
|
||||
CLST vcn = vbo >> sbi->cluster_bits;
|
||||
CLST cend = bytes_to_cluster(sbi, end);
|
||||
@ -717,8 +740,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
|
||||
}
|
||||
|
||||
out:
|
||||
if (err == -EFBIG)
|
||||
err = -ENOSPC;
|
||||
if (map_locked)
|
||||
filemap_invalidate_unlock(mapping);
|
||||
|
||||
if (!err) {
|
||||
inode->i_ctime = inode->i_mtime = current_time(inode);
|
||||
@ -989,7 +1012,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
if (bytes > count)
|
||||
bytes = count;
|
||||
|
||||
frame = pos >> frame_bits;
|
||||
frame_vbo = pos & ~(frame_size - 1);
|
||||
index = frame_vbo >> PAGE_SHIFT;
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/fiemap.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "debug.h"
|
||||
@ -468,7 +469,7 @@ ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi,
|
||||
&ref, &le);
|
||||
if (err) {
|
||||
/* No memory or no space. */
|
||||
return NULL;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
le_added = true;
|
||||
|
||||
@ -649,6 +650,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
|
||||
struct mft_inode *mi;
|
||||
u32 asize, free;
|
||||
struct MFT_REF ref;
|
||||
struct MFT_REC *mrec;
|
||||
__le16 id;
|
||||
|
||||
if (!ni->attr_list.dirty)
|
||||
@ -692,11 +694,17 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
|
||||
free -= asize;
|
||||
}
|
||||
|
||||
/* Make a copy of primary record to restore if error. */
|
||||
mrec = kmemdup(ni->mi.mrec, sbi->record_size, GFP_NOFS);
|
||||
if (!mrec)
|
||||
return 0; /* Not critical. */
|
||||
|
||||
/* It seems that attribute list can be removed from primary record. */
|
||||
mi_remove_attr(NULL, &ni->mi, attr_list);
|
||||
|
||||
/*
|
||||
* Repeat the cycle above and move all attributes to primary record.
|
||||
* Repeat the cycle above and copy all attributes to primary record.
|
||||
* Do not remove original attributes from subrecords!
|
||||
* It should be success!
|
||||
*/
|
||||
le = NULL;
|
||||
@ -707,14 +715,14 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
|
||||
mi = ni_find_mi(ni, ino_get(&le->ref));
|
||||
if (!mi) {
|
||||
/* Should never happened, 'cause already checked. */
|
||||
goto bad;
|
||||
goto out;
|
||||
}
|
||||
|
||||
attr = mi_find_attr(mi, NULL, le->type, le_name(le),
|
||||
le->name_len, &le->id);
|
||||
if (!attr) {
|
||||
/* Should never happened, 'cause already checked. */
|
||||
goto bad;
|
||||
goto out;
|
||||
}
|
||||
asize = le32_to_cpu(attr->size);
|
||||
|
||||
@ -724,18 +732,33 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
|
||||
le16_to_cpu(attr->name_off));
|
||||
if (!attr_ins) {
|
||||
/*
|
||||
* Internal error.
|
||||
* Either no space in primary record (already checked).
|
||||
* Either tried to insert another
|
||||
* non indexed attribute (logic error).
|
||||
* No space in primary record (already checked).
|
||||
*/
|
||||
goto bad;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Copy all except id. */
|
||||
id = attr_ins->id;
|
||||
memcpy(attr_ins, attr, asize);
|
||||
attr_ins->id = id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Repeat the cycle above and remove all attributes from subrecords.
|
||||
*/
|
||||
le = NULL;
|
||||
while ((le = al_enumerate(ni, le))) {
|
||||
if (!memcmp(&le->ref, &ref, sizeof(ref)))
|
||||
continue;
|
||||
|
||||
mi = ni_find_mi(ni, ino_get(&le->ref));
|
||||
if (!mi)
|
||||
continue;
|
||||
|
||||
attr = mi_find_attr(mi, NULL, le->type, le_name(le),
|
||||
le->name_len, &le->id);
|
||||
if (!attr)
|
||||
continue;
|
||||
|
||||
/* Remove from original record. */
|
||||
mi_remove_attr(NULL, mi, attr);
|
||||
@ -748,11 +771,13 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
|
||||
ni->attr_list.le = NULL;
|
||||
ni->attr_list.dirty = false;
|
||||
|
||||
kfree(mrec);
|
||||
return 0;
|
||||
out:
|
||||
/* Restore primary record. */
|
||||
swap(mrec, ni->mi.mrec);
|
||||
kfree(mrec);
|
||||
return 0;
|
||||
bad:
|
||||
ntfs_inode_err(&ni->vfs_inode, "Internal error");
|
||||
make_bad_inode(&ni->vfs_inode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -986,6 +1011,8 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
|
||||
name_off, svcn, ins_le);
|
||||
if (!attr)
|
||||
continue;
|
||||
if (IS_ERR(attr))
|
||||
return PTR_ERR(attr);
|
||||
|
||||
if (ins_attr)
|
||||
*ins_attr = attr;
|
||||
@ -1007,8 +1034,15 @@ insert_ext:
|
||||
|
||||
attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
|
||||
name_off, svcn, ins_le);
|
||||
if (!attr)
|
||||
if (!attr) {
|
||||
err = -EINVAL;
|
||||
goto out2;
|
||||
}
|
||||
|
||||
if (IS_ERR(attr)) {
|
||||
err = PTR_ERR(attr);
|
||||
goto out2;
|
||||
}
|
||||
|
||||
if (ins_attr)
|
||||
*ins_attr = attr;
|
||||
@ -1020,10 +1054,9 @@ insert_ext:
|
||||
out2:
|
||||
ni_remove_mi(ni, mi);
|
||||
mi_put(mi);
|
||||
err = -EINVAL;
|
||||
|
||||
out1:
|
||||
ntfs_mark_rec_free(sbi, rno);
|
||||
ntfs_mark_rec_free(sbi, rno, is_mft);
|
||||
|
||||
out:
|
||||
return err;
|
||||
@ -1076,6 +1109,11 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
if (asize <= free) {
|
||||
attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
|
||||
asize, name_off, svcn, ins_le);
|
||||
if (IS_ERR(attr)) {
|
||||
err = PTR_ERR(attr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (attr) {
|
||||
if (ins_attr)
|
||||
*ins_attr = attr;
|
||||
@ -1173,6 +1211,11 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (IS_ERR(attr)) {
|
||||
err = PTR_ERR(attr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ins_attr)
|
||||
*ins_attr = attr;
|
||||
if (ins_mi)
|
||||
@ -1218,7 +1261,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
|
||||
mft_min = mft_new;
|
||||
mi_min = mi_new;
|
||||
} else {
|
||||
ntfs_mark_rec_free(sbi, mft_new);
|
||||
ntfs_mark_rec_free(sbi, mft_new, true);
|
||||
mft_new = 0;
|
||||
ni_remove_mi(ni, mi_new);
|
||||
}
|
||||
@ -1262,7 +1305,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
|
||||
done = asize - run_size - SIZEOF_NONRESIDENT;
|
||||
le32_sub_cpu(&ni->mi.mrec->used, done);
|
||||
|
||||
/* Estimate the size of second part: run_buf=NULL. */
|
||||
/* Estimate packed size (run_buf=NULL). */
|
||||
err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
|
||||
&plen);
|
||||
if (err < 0)
|
||||
@ -1288,10 +1331,16 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (IS_ERR(attr)) {
|
||||
err = PTR_ERR(attr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
attr->non_res = 1;
|
||||
attr->name_off = SIZEOF_NONRESIDENT_LE;
|
||||
attr->flags = 0;
|
||||
|
||||
/* This function can't fail - cause already checked above. */
|
||||
run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
|
||||
run_size, &plen);
|
||||
|
||||
@ -1301,7 +1350,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni)
|
||||
|
||||
out:
|
||||
if (mft_new) {
|
||||
ntfs_mark_rec_free(sbi, mft_new);
|
||||
ntfs_mark_rec_free(sbi, mft_new, true);
|
||||
ni_remove_mi(ni, mi_new);
|
||||
}
|
||||
|
||||
@ -1367,8 +1416,6 @@ int ni_expand_list(struct ntfs_inode *ni)
|
||||
|
||||
/* Split MFT data as much as possible. */
|
||||
err = ni_expand_mft_list(ni);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
return !err && !done ? -EOPNOTSUPP : err;
|
||||
@ -1381,7 +1428,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
const __le16 *name, u8 name_len,
|
||||
const struct runs_tree *run, CLST svcn, CLST len,
|
||||
__le16 flags, struct ATTRIB **new_attr,
|
||||
struct mft_inode **mi)
|
||||
struct mft_inode **mi, struct ATTR_LIST_ENTRY **le)
|
||||
{
|
||||
int err;
|
||||
CLST plen;
|
||||
@ -1394,6 +1441,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
u32 run_size, asize;
|
||||
struct ntfs_sb_info *sbi = ni->mi.sbi;
|
||||
|
||||
/* Estimate packed size (run_buf=NULL). */
|
||||
err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
|
||||
&plen);
|
||||
if (err < 0)
|
||||
@ -1414,7 +1462,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
}
|
||||
|
||||
err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
|
||||
&attr, mi, NULL);
|
||||
&attr, mi, le);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
@ -1423,12 +1471,12 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
attr->name_off = cpu_to_le16(name_off);
|
||||
attr->flags = flags;
|
||||
|
||||
/* This function can't fail - cause already checked above. */
|
||||
run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
|
||||
|
||||
attr->nres.svcn = cpu_to_le64(svcn);
|
||||
attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
|
||||
|
||||
err = 0;
|
||||
if (new_attr)
|
||||
*new_attr = attr;
|
||||
|
||||
@ -1560,7 +1608,7 @@ int ni_delete_all(struct ntfs_inode *ni)
|
||||
mi->dirty = true;
|
||||
mi_write(mi, 0);
|
||||
|
||||
ntfs_mark_rec_free(sbi, mi->rno);
|
||||
ntfs_mark_rec_free(sbi, mi->rno, false);
|
||||
ni_remove_mi(ni, mi);
|
||||
mi_put(mi);
|
||||
node = next;
|
||||
@ -1571,7 +1619,7 @@ int ni_delete_all(struct ntfs_inode *ni)
|
||||
ni->mi.dirty = true;
|
||||
err = mi_write(&ni->mi, 0);
|
||||
|
||||
ntfs_mark_rec_free(sbi, ni->mi.rno);
|
||||
ntfs_mark_rec_free(sbi, ni->mi.rno, false);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1589,7 +1637,8 @@ struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
|
||||
struct ATTRIB *attr = NULL;
|
||||
struct ATTR_FILE_NAME *fname;
|
||||
|
||||
*le = NULL;
|
||||
if (le)
|
||||
*le = NULL;
|
||||
|
||||
/* Enumerate all names. */
|
||||
next:
|
||||
@ -1605,7 +1654,7 @@ next:
|
||||
goto next;
|
||||
|
||||
if (!uni)
|
||||
goto next;
|
||||
return fname;
|
||||
|
||||
if (uni->len != fname->name_len)
|
||||
goto next;
|
||||
@ -2302,10 +2351,8 @@ remove_wof:
|
||||
|
||||
out:
|
||||
kfree(pages);
|
||||
if (err) {
|
||||
make_bad_inode(inode);
|
||||
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
|
||||
}
|
||||
if (err)
|
||||
_ntfs_bad_inode(inode);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -2944,7 +2991,7 @@ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
|
||||
}
|
||||
|
||||
/*
|
||||
* ni_add_name - Add new name in MFT and in directory.
|
||||
* ni_add_name - Add new name into MFT and into directory.
|
||||
*/
|
||||
int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
|
||||
struct NTFS_DE *de)
|
||||
@ -2953,13 +3000,20 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
|
||||
struct ATTRIB *attr;
|
||||
struct ATTR_LIST_ENTRY *le;
|
||||
struct mft_inode *mi;
|
||||
struct ATTR_FILE_NAME *fname;
|
||||
struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1);
|
||||
u16 de_key_size = le16_to_cpu(de->key_size);
|
||||
|
||||
mi_get_ref(&ni->mi, &de->ref);
|
||||
mi_get_ref(&dir_ni->mi, &de_name->home);
|
||||
|
||||
/* Insert new name in MFT. */
|
||||
/* Fill duplicate from any ATTR_NAME. */
|
||||
fname = ni_fname_name(ni, NULL, NULL, NULL, NULL);
|
||||
if (fname)
|
||||
memcpy(&de_name->dup, &fname->dup, sizeof(fname->dup));
|
||||
de_name->dup.fa = ni->std_fa;
|
||||
|
||||
/* Insert new name into MFT. */
|
||||
err = ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, &attr,
|
||||
&mi, &le);
|
||||
if (err)
|
||||
@ -2967,7 +3021,7 @@ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
|
||||
|
||||
memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de_name, de_key_size);
|
||||
|
||||
/* Insert new name in directory. */
|
||||
/* Insert new name into directory. */
|
||||
err = indx_insert_entry(&dir_ni->dir, dir_ni, de, ni->mi.sbi, NULL, 0);
|
||||
if (err)
|
||||
ni_remove_attr_le(ni, attr, mi, le);
|
||||
@ -2991,7 +3045,7 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
|
||||
* 1) Add new name and remove old name.
|
||||
* 2) Remove old name and add new name.
|
||||
*
|
||||
* In most cases (not all!) adding new name in MFT and in directory can
|
||||
* In most cases (not all!) adding new name into MFT and into directory can
|
||||
* allocate additional cluster(s).
|
||||
* Second way may result to bad inode if we can't add new name
|
||||
* and then can't restore (add) old name.
|
||||
@ -3261,7 +3315,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
|
||||
err = err2;
|
||||
|
||||
if (is_empty) {
|
||||
ntfs_mark_rec_free(sbi, mi->rno);
|
||||
ntfs_mark_rec_free(sbi, mi->rno, false);
|
||||
rb_erase(node, &ni->mi_tree);
|
||||
mi_put(mi);
|
||||
}
|
||||
|
@ -3843,6 +3843,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
|
||||
|
||||
memset(&rst_info2, 0, sizeof(struct restart_info));
|
||||
err = log_read_rst(log, l_size, false, &rst_info2);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Determine which restart area to use. */
|
||||
if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
|
||||
@ -5057,7 +5059,7 @@ undo_action_next:
|
||||
goto add_allocated_vcns;
|
||||
|
||||
vcn = le64_to_cpu(lrh->target_vcn);
|
||||
vcn &= ~(log->clst_per_page - 1);
|
||||
vcn &= ~(u64)(log->clst_per_page - 1);
|
||||
|
||||
add_allocated_vcns:
|
||||
for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
|
||||
|
@ -703,12 +703,14 @@ out:
|
||||
|
||||
/*
|
||||
* ntfs_mark_rec_free - Mark record as free.
|
||||
* is_mft - true if we are changing MFT
|
||||
*/
|
||||
void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
|
||||
void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
|
||||
{
|
||||
struct wnd_bitmap *wnd = &sbi->mft.bitmap;
|
||||
|
||||
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
|
||||
if (!is_mft)
|
||||
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
|
||||
if (rno >= wnd->nbits)
|
||||
goto out;
|
||||
|
||||
@ -727,7 +729,8 @@ void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
|
||||
sbi->mft.next_free = rno;
|
||||
|
||||
out:
|
||||
up_write(&wnd->rw_lock);
|
||||
if (!is_mft)
|
||||
up_write(&wnd->rw_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -780,7 +783,7 @@ out:
|
||||
*/
|
||||
int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
|
||||
{
|
||||
CLST zone_limit, zone_max, lcn, vcn, len;
|
||||
CLST lcn, vcn, len;
|
||||
size_t lcn_s, zlen;
|
||||
struct wnd_bitmap *wnd = &sbi->used.bitmap;
|
||||
struct ntfs_inode *ni = sbi->mft.ni;
|
||||
@ -789,16 +792,6 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
|
||||
if (wnd_zone_len(wnd))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Compute the MFT zone at two steps.
|
||||
* It would be nice if we are able to allocate 1/8 of
|
||||
* total clusters for MFT but not more then 512 MB.
|
||||
*/
|
||||
zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
|
||||
zone_max = wnd->nbits >> 3;
|
||||
if (zone_max > zone_limit)
|
||||
zone_max = zone_limit;
|
||||
|
||||
vcn = bytes_to_cluster(sbi,
|
||||
(u64)sbi->mft.bitmap.nbits << sbi->record_bits);
|
||||
|
||||
@ -812,13 +805,7 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
|
||||
lcn_s = lcn + 1;
|
||||
|
||||
/* Try to allocate clusters after last MFT run. */
|
||||
zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
|
||||
if (!zlen) {
|
||||
ntfs_notice(sbi->sb, "MftZone: unavailable");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Truncate too large zone. */
|
||||
zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
|
||||
wnd_zone_set(wnd, lcn_s, zlen);
|
||||
|
||||
return 0;
|
||||
@ -827,16 +814,21 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
|
||||
/*
|
||||
* ntfs_update_mftmirr - Update $MFTMirr data.
|
||||
*/
|
||||
int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
|
||||
void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
|
||||
{
|
||||
int err;
|
||||
struct super_block *sb = sbi->sb;
|
||||
u32 blocksize = sb->s_blocksize;
|
||||
u32 blocksize;
|
||||
sector_t block1, block2;
|
||||
u32 bytes;
|
||||
|
||||
if (!sb)
|
||||
return;
|
||||
|
||||
blocksize = sb->s_blocksize;
|
||||
|
||||
if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
err = 0;
|
||||
bytes = sbi->mft.recs_mirr << sbi->record_bits;
|
||||
@ -847,16 +839,13 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
|
||||
struct buffer_head *bh1, *bh2;
|
||||
|
||||
bh1 = sb_bread(sb, block1++);
|
||||
if (!bh1) {
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
if (!bh1)
|
||||
return;
|
||||
|
||||
bh2 = sb_getblk(sb, block2++);
|
||||
if (!bh2) {
|
||||
put_bh(bh1);
|
||||
err = -EIO;
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
if (buffer_locked(bh2))
|
||||
@ -876,13 +865,24 @@ int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
|
||||
|
||||
put_bh(bh2);
|
||||
if (err)
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
|
||||
}
|
||||
|
||||
out:
|
||||
return err;
|
||||
/*
|
||||
* ntfs_bad_inode
|
||||
*
|
||||
* Marks inode as bad and marks fs as 'dirty'
|
||||
*/
|
||||
void ntfs_bad_inode(struct inode *inode, const char *hint)
|
||||
{
|
||||
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
|
||||
|
||||
ntfs_inode_err(inode, "%s", hint);
|
||||
make_bad_inode(inode);
|
||||
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1395,7 +1395,7 @@ int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
|
||||
if (buffer_locked(bh))
|
||||
__wait_on_buffer(bh);
|
||||
|
||||
lock_buffer(nb->bh[idx]);
|
||||
lock_buffer(bh);
|
||||
|
||||
bh_data = bh->b_data + off;
|
||||
end_data = Add2Ptr(bh_data, op);
|
||||
@ -2424,7 +2424,7 @@ static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
|
||||
|
||||
void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
|
||||
{
|
||||
CLST end, i;
|
||||
CLST end, i, zone_len, zlen;
|
||||
struct wnd_bitmap *wnd = &sbi->used.bitmap;
|
||||
|
||||
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
|
||||
@ -2459,6 +2459,28 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
|
||||
ntfs_unmap_and_discard(sbi, lcn, len);
|
||||
wnd_set_free(wnd, lcn, len);
|
||||
|
||||
/* append to MFT zone, if possible. */
|
||||
zone_len = wnd_zone_len(wnd);
|
||||
zlen = min(zone_len + len, sbi->zone_max);
|
||||
|
||||
if (zlen == zone_len) {
|
||||
/* MFT zone already has maximum size. */
|
||||
} else if (!zone_len) {
|
||||
/* Create MFT zone only if 'zlen' is large enough. */
|
||||
if (zlen == sbi->zone_max)
|
||||
wnd_zone_set(wnd, lcn, zlen);
|
||||
} else {
|
||||
CLST zone_lcn = wnd_zone_bit(wnd);
|
||||
|
||||
if (lcn + len == zone_lcn) {
|
||||
/* Append into head MFT zone. */
|
||||
wnd_zone_set(wnd, lcn, zlen);
|
||||
} else if (zone_lcn + zone_len == lcn) {
|
||||
/* Append into tail MFT zone. */
|
||||
wnd_zone_set(wnd, zone_lcn, zlen);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
up_write(&wnd->rw_lock);
|
||||
}
|
||||
|
@ -1042,19 +1042,16 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
|
||||
{
|
||||
int err;
|
||||
struct NTFS_DE *e;
|
||||
const struct INDEX_HDR *hdr;
|
||||
struct indx_node *node;
|
||||
|
||||
if (!root)
|
||||
root = indx_get_root(&ni->dir, ni, NULL, NULL);
|
||||
|
||||
if (!root) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
/* Should not happen. */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdr = &root->ihdr;
|
||||
|
||||
/* Check cache. */
|
||||
e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de;
|
||||
if (e && !de_is_last(e) &&
|
||||
@ -1068,39 +1065,35 @@ int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
|
||||
fnd_clear(fnd);
|
||||
|
||||
/* Lookup entry that is <= to the search value. */
|
||||
e = hdr_find_e(indx, hdr, key, key_len, ctx, diff);
|
||||
e = hdr_find_e(indx, &root->ihdr, key, key_len, ctx, diff);
|
||||
if (!e)
|
||||
return -EINVAL;
|
||||
|
||||
fnd->root_de = e;
|
||||
err = 0;
|
||||
|
||||
for (;;) {
|
||||
node = NULL;
|
||||
if (*diff >= 0 || !de_has_vcn_ex(e)) {
|
||||
*entry = e;
|
||||
goto out;
|
||||
}
|
||||
if (*diff >= 0 || !de_has_vcn_ex(e))
|
||||
break;
|
||||
|
||||
/* Read next level. */
|
||||
err = indx_read(indx, ni, de_get_vbn(e), &node);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
/* Lookup entry that is <= to the search value. */
|
||||
e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
|
||||
diff);
|
||||
if (!e) {
|
||||
err = -EINVAL;
|
||||
put_indx_node(node);
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fnd_push(fnd, node, e);
|
||||
}
|
||||
|
||||
out:
|
||||
return err;
|
||||
*entry = e;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
|
||||
@ -1354,7 +1347,7 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
|
||||
goto out;
|
||||
|
||||
err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len,
|
||||
&run, 0, len, 0, &alloc, NULL);
|
||||
&run, 0, len, 0, &alloc, NULL, NULL);
|
||||
if (err)
|
||||
goto out1;
|
||||
|
||||
@ -1685,8 +1678,8 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
|
||||
{
|
||||
int err;
|
||||
const struct NTFS_DE *sp;
|
||||
struct NTFS_DE *e, *de_t, *up_e = NULL;
|
||||
struct indx_node *n2 = NULL;
|
||||
struct NTFS_DE *e, *de_t, *up_e;
|
||||
struct indx_node *n2;
|
||||
struct indx_node *n1 = fnd->nodes[level];
|
||||
struct INDEX_HDR *hdr1 = &n1->index->ihdr;
|
||||
struct INDEX_HDR *hdr2;
|
||||
@ -1994,7 +1987,7 @@ static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
|
||||
const struct NTFS_DE *e, bool trim)
|
||||
{
|
||||
int err;
|
||||
struct indx_node *n;
|
||||
struct indx_node *n = NULL;
|
||||
struct INDEX_HDR *hdr;
|
||||
CLST vbn = de_get_vbn(e);
|
||||
size_t i;
|
||||
|
@ -430,6 +430,7 @@ end_enum:
|
||||
} else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
|
||||
fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
|
||||
/* Records in $Extend are not a files or general directories. */
|
||||
inode->i_op = &ntfs_file_inode_operations;
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
@ -500,7 +501,7 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
|
||||
inode = ntfs_read_mft(inode, name, ref);
|
||||
else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
|
||||
/* Inode overlaps? */
|
||||
make_bad_inode(inode);
|
||||
_ntfs_bad_inode(inode);
|
||||
}
|
||||
|
||||
return inode;
|
||||
@ -1632,7 +1633,7 @@ out4:
|
||||
ni->mi.dirty = false;
|
||||
discard_new_inode(inode);
|
||||
out3:
|
||||
ntfs_mark_rec_free(sbi, ino);
|
||||
ntfs_mark_rec_free(sbi, ino, false);
|
||||
|
||||
out2:
|
||||
__putname(new_de);
|
||||
@ -1655,7 +1656,6 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
|
||||
struct ntfs_inode *ni = ntfs_i(inode);
|
||||
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
|
||||
struct NTFS_DE *de;
|
||||
struct ATTR_FILE_NAME *de_name;
|
||||
|
||||
/* Allocate PATH_MAX bytes. */
|
||||
de = __getname();
|
||||
@ -1670,15 +1670,6 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
de_name = (struct ATTR_FILE_NAME *)(de + 1);
|
||||
/* Fill duplicate info. */
|
||||
de_name->dup.cr_time = de_name->dup.m_time = de_name->dup.c_time =
|
||||
de_name->dup.a_time = kernel2nt(&inode->i_ctime);
|
||||
de_name->dup.alloc_size = de_name->dup.data_size =
|
||||
cpu_to_le64(inode->i_size);
|
||||
de_name->dup.fa = ni->std_fa;
|
||||
de_name->dup.ea_size = de_name->dup.reparse = 0;
|
||||
|
||||
err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
|
||||
out:
|
||||
__putname(de);
|
||||
@ -1731,9 +1722,7 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
|
||||
if (inode->i_nlink)
|
||||
mark_inode_dirty(inode);
|
||||
} else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
|
||||
make_bad_inode(inode);
|
||||
ntfs_inode_err(inode, "failed to undo unlink");
|
||||
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
|
||||
_ntfs_bad_inode(inode);
|
||||
} else {
|
||||
if (ni_is_dirty(dir))
|
||||
mark_inode_dirty(dir);
|
||||
|
@ -208,7 +208,7 @@ static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
|
||||
}
|
||||
|
||||
/*
|
||||
* ntfs_rmdir - inode_operations::rm_dir
|
||||
* ntfs_rmdir - inode_operations::rmdir
|
||||
*/
|
||||
static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
@ -308,9 +308,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *dir,
|
||||
err = ni_rename(dir_ni, new_dir_ni, ni, de, new_de, &is_bad);
|
||||
if (is_bad) {
|
||||
/* Restore after failed rename failed too. */
|
||||
make_bad_inode(inode);
|
||||
ntfs_inode_err(inode, "failed to undo rename");
|
||||
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
|
||||
_ntfs_bad_inode(inode);
|
||||
} else if (!err) {
|
||||
inode->i_ctime = dir->i_ctime = dir->i_mtime =
|
||||
current_time(dir);
|
||||
|
@ -220,6 +220,7 @@ struct ntfs_sb_info {
|
||||
|
||||
u32 flags; // See NTFS_FLAGS_XXX.
|
||||
|
||||
CLST zone_max; // Maximum MFT zone length in clusters
|
||||
CLST bad_clusters; // The count of marked bad clusters.
|
||||
|
||||
u16 max_bytes_per_attr; // Maximum attribute size in record.
|
||||
@ -408,8 +409,6 @@ enum REPARSE_SIGN {
|
||||
};
|
||||
|
||||
/* Functions from attrib.c */
|
||||
int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
|
||||
struct runs_tree *run, const CLST *vcn);
|
||||
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
|
||||
CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
|
||||
enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
|
||||
@ -440,6 +439,7 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
|
||||
int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
|
||||
u64 new_valid);
|
||||
int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
|
||||
int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
|
||||
int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size);
|
||||
|
||||
/* Functions from attrlist.c */
|
||||
@ -528,7 +528,7 @@ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
|
||||
const __le16 *name, u8 name_len,
|
||||
const struct runs_tree *run, CLST svcn, CLST len,
|
||||
__le16 flags, struct ATTRIB **new_attr,
|
||||
struct mft_inode **mi);
|
||||
struct mft_inode **mi, struct ATTR_LIST_ENTRY **le);
|
||||
int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
|
||||
enum ATTR_TYPE type, const __le16 *name, u8 name_len,
|
||||
struct ATTRIB **new_attr, struct mft_inode **mi,
|
||||
@ -589,10 +589,12 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
|
||||
enum ALLOCATE_OPT opt);
|
||||
int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
|
||||
struct ntfs_inode *ni, struct mft_inode **mi);
|
||||
void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno);
|
||||
void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft);
|
||||
int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to);
|
||||
int ntfs_refresh_zone(struct ntfs_sb_info *sbi);
|
||||
int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
|
||||
void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
|
||||
void ntfs_bad_inode(struct inode *inode, const char *hint);
|
||||
#define _ntfs_bad_inode(i) ntfs_bad_inode(i, __func__)
|
||||
enum NTFS_DIRTY_FLAGS {
|
||||
NTFS_DIRTY_CLEAR = 0,
|
||||
NTFS_DIRTY_DIRTY = 1,
|
||||
@ -738,7 +740,6 @@ static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
|
||||
int mi_write(struct mft_inode *mi, int wait);
|
||||
int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
|
||||
__le16 flags, bool is_mft);
|
||||
void mi_mark_free(struct mft_inode *mi);
|
||||
struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
|
||||
const __le16 *name, u8 name_len, u32 asize,
|
||||
u16 name_off);
|
||||
@ -780,10 +781,10 @@ bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
|
||||
void run_truncate(struct runs_tree *run, CLST vcn);
|
||||
void run_truncate_head(struct runs_tree *run, CLST vcn);
|
||||
void run_truncate_around(struct runs_tree *run, CLST vcn);
|
||||
bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
|
||||
bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
|
||||
bool is_mft);
|
||||
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
|
||||
bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len);
|
||||
bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
|
||||
CLST *lcn, CLST *len);
|
||||
bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
|
||||
@ -802,6 +803,7 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
|
||||
#define run_unpack_ex run_unpack
|
||||
#endif
|
||||
int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
|
||||
int run_clone(const struct runs_tree *run, struct runs_tree *new_run);
|
||||
|
||||
/* Globals from super.c */
|
||||
void *ntfs_set_shared(void *ptr, u32 bytes);
|
||||
|
@ -394,28 +394,6 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* mi_mark_free - Mark record as unused and marks it as free in bitmap.
|
||||
*/
|
||||
void mi_mark_free(struct mft_inode *mi)
|
||||
{
|
||||
CLST rno = mi->rno;
|
||||
struct ntfs_sb_info *sbi = mi->sbi;
|
||||
|
||||
if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
|
||||
ntfs_clear_mft_tail(sbi, rno, rno + 1);
|
||||
mi->dirty = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (mi->mrec) {
|
||||
clear_rec_inuse(mi->mrec);
|
||||
mi->dirty = true;
|
||||
mi_write(mi, 0);
|
||||
}
|
||||
ntfs_mark_rec_free(sbi, rno);
|
||||
}
|
||||
|
||||
/*
|
||||
* mi_insert_attr - Reserve space for new attribute.
|
||||
*
|
||||
@ -445,12 +423,11 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
|
||||
attr = NULL;
|
||||
while ((attr = mi_enum_attr(mi, attr))) {
|
||||
diff = compare_attr(attr, type, name, name_len, upcase);
|
||||
if (diff > 0)
|
||||
break;
|
||||
|
||||
if (diff < 0)
|
||||
continue;
|
||||
|
||||
if (!is_attr_indexed(attr))
|
||||
if (!diff && !is_attr_indexed(attr))
|
||||
return NULL;
|
||||
break;
|
||||
}
|
||||
|
108
fs/ntfs3/run.c
108
fs/ntfs3/run.c
@ -31,7 +31,7 @@ struct ntfs_run {
|
||||
* Case of entry missing from list 'index' will be set to
|
||||
* point to insertion position for the entry question.
|
||||
*/
|
||||
bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
|
||||
static bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
|
||||
{
|
||||
size_t min_idx, max_idx, mid_idx;
|
||||
struct ntfs_run *r;
|
||||
@ -547,6 +547,48 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* run_insert_range
|
||||
*
|
||||
* Helper for attr_insert_range(),
|
||||
* which is helper for fallocate(insert_range).
|
||||
*/
|
||||
bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len)
|
||||
{
|
||||
size_t index;
|
||||
struct ntfs_run *r, *e;
|
||||
|
||||
if (WARN_ON(!run_lookup(run, vcn, &index)))
|
||||
return false; /* Should never be here. */
|
||||
|
||||
e = run->runs + run->count;
|
||||
r = run->runs + index;
|
||||
|
||||
if (vcn > r->vcn)
|
||||
r += 1;
|
||||
|
||||
for (; r < e; r++)
|
||||
r->vcn += len;
|
||||
|
||||
r = run->runs + index;
|
||||
|
||||
if (vcn > r->vcn) {
|
||||
/* split fragment. */
|
||||
CLST len1 = vcn - r->vcn;
|
||||
CLST len2 = r->len - len1;
|
||||
CLST lcn2 = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len1);
|
||||
|
||||
r->len = len1;
|
||||
|
||||
if (!run_add_entry(run, vcn + len, lcn2, len2, false))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* run_get_entry - Return index-th mapped region.
|
||||
*/
|
||||
@ -778,26 +820,36 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
|
||||
CLST next_vcn, vcn, lcn;
|
||||
CLST prev_lcn = 0;
|
||||
CLST evcn1 = svcn + len;
|
||||
const struct ntfs_run *r, *r_end;
|
||||
int packed_size = 0;
|
||||
size_t i;
|
||||
bool ok;
|
||||
s64 dlcn;
|
||||
int offset_size, size_size, tmp;
|
||||
|
||||
next_vcn = vcn = svcn;
|
||||
|
||||
*packed_vcns = 0;
|
||||
|
||||
if (!len)
|
||||
goto out;
|
||||
|
||||
ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
|
||||
/* Check all required entries [svcn, encv1) available. */
|
||||
if (!run_lookup(run, svcn, &i))
|
||||
return -ENOENT;
|
||||
|
||||
if (!ok)
|
||||
goto error;
|
||||
r_end = run->runs + run->count;
|
||||
r = run->runs + i;
|
||||
|
||||
if (next_vcn != vcn)
|
||||
goto error;
|
||||
for (next_vcn = r->vcn + r->len; next_vcn < evcn1;
|
||||
next_vcn = r->vcn + r->len) {
|
||||
if (++r >= r_end || r->vcn != next_vcn)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Repeat cycle above and pack runs. Assume no errors. */
|
||||
r = run->runs + i;
|
||||
len = svcn - r->vcn;
|
||||
vcn = svcn;
|
||||
lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + len);
|
||||
len = r->len - len;
|
||||
|
||||
for (;;) {
|
||||
next_vcn = vcn + len;
|
||||
@ -846,12 +898,10 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
|
||||
if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
|
||||
goto out;
|
||||
|
||||
ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
|
||||
if (!ok)
|
||||
goto error;
|
||||
|
||||
if (next_vcn != vcn)
|
||||
goto error;
|
||||
r += 1;
|
||||
vcn = r->vcn;
|
||||
lcn = r->lcn;
|
||||
len = r->len;
|
||||
}
|
||||
|
||||
out:
|
||||
@ -860,9 +910,6 @@ out:
|
||||
run_buf[0] = 0;
|
||||
|
||||
return packed_size + 1;
|
||||
|
||||
error:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1109,3 +1156,28 @@ int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
|
||||
*highest_vcn = vcn64 - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* run_clone
|
||||
*
|
||||
* Make a copy of run
|
||||
*/
|
||||
int run_clone(const struct runs_tree *run, struct runs_tree *new_run)
|
||||
{
|
||||
size_t bytes = run->count * sizeof(struct ntfs_run);
|
||||
|
||||
if (bytes > new_run->allocated) {
|
||||
struct ntfs_run *new_ptr = kvmalloc(bytes, GFP_KERNEL);
|
||||
|
||||
if (!new_ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
kvfree(new_run->runs);
|
||||
new_run->runs = new_ptr;
|
||||
new_run->allocated = bytes;
|
||||
}
|
||||
|
||||
memcpy(new_run->runs, run->runs, bytes);
|
||||
new_run->count = run->count;
|
||||
return 0;
|
||||
}
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <linux/fs_context.h>
|
||||
#include <linux/fs_parser.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nls.h>
|
||||
#include <linux/seq_file.h>
|
||||
@ -390,7 +391,7 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(sbi->options, new_opts, sizeof(*new_opts));
|
||||
swap(sbi->options, fc->fs_private);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -870,6 +871,13 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
|
||||
sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Compute the MFT zone at two steps.
|
||||
* It would be nice if we are able to allocate 1/8 of
|
||||
* total clusters for MFT but not more then 512 MB.
|
||||
*/
|
||||
sbi->zone_max = min_t(CLST, 0x20000000 >> sbi->cluster_bits, clusters >> 3);
|
||||
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
@ -900,6 +908,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
ref.high = 0;
|
||||
|
||||
sbi->sb = sb;
|
||||
sbi->options = fc->fs_private;
|
||||
fc->fs_private = NULL;
|
||||
sb->s_flags |= SB_NODIRATIME;
|
||||
sb->s_magic = 0x7366746e; // "ntfs"
|
||||
sb->s_op = &ntfs_sops;
|
||||
@ -1262,8 +1272,6 @@ load_root:
|
||||
goto put_inode_out;
|
||||
}
|
||||
|
||||
fc->fs_private = NULL;
|
||||
|
||||
return 0;
|
||||
|
||||
put_inode_out:
|
||||
@ -1378,7 +1386,7 @@ static const struct fs_context_operations ntfs_context_ops = {
|
||||
/*
|
||||
* ntfs_init_fs_context - Initialize spi and opts
|
||||
*
|
||||
* This will called when mount/remount. We will first initiliaze
|
||||
* This will called when mount/remount. We will first initialize
|
||||
* options so that if remount we can use just that.
|
||||
*/
|
||||
static int ntfs_init_fs_context(struct fs_context *fc)
|
||||
@ -1416,7 +1424,6 @@ static int ntfs_init_fs_context(struct fs_context *fc)
|
||||
mutex_init(&sbi->compress.mtx_lzx);
|
||||
#endif
|
||||
|
||||
sbi->options = opts;
|
||||
fc->s_fs_info = sbi;
|
||||
ok:
|
||||
fc->fs_private = opts;
|
||||
|
@ -118,7 +118,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
|
||||
|
||||
run_init(&run);
|
||||
|
||||
err = attr_load_runs(attr_ea, ni, &run, NULL);
|
||||
err = attr_load_runs_range(ni, ATTR_EA, NULL, 0, &run, 0, size);
|
||||
if (!err)
|
||||
err = ntfs_read_run_nb(sbi, &run, 0, ea_p, size, NULL);
|
||||
run_close(&run);
|
||||
@ -444,6 +444,11 @@ update_ea:
|
||||
/* Delete xattr, ATTR_EA */
|
||||
ni_remove_attr_le(ni, attr, mi, le);
|
||||
} else if (attr->non_res) {
|
||||
err = attr_load_runs_range(ni, ATTR_EA, NULL, 0, &ea_run, 0,
|
||||
size);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size, 0);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -547,28 +552,23 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
|
||||
{
|
||||
const char *name;
|
||||
size_t size, name_len;
|
||||
void *value = NULL;
|
||||
int err = 0;
|
||||
void *value;
|
||||
int err;
|
||||
int flags;
|
||||
umode_t mode;
|
||||
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mode = inode->i_mode;
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
/* Do not change i_mode if we are in init_acl */
|
||||
if (acl && !init_acl) {
|
||||
umode_t mode;
|
||||
|
||||
err = posix_acl_update_mode(mnt_userns, inode, &mode,
|
||||
&acl);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (inode->i_mode != mode) {
|
||||
inode->i_mode = mode;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
name = XATTR_NAME_POSIX_ACL_ACCESS;
|
||||
name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
|
||||
@ -604,8 +604,13 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
|
||||
err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
|
||||
if (err == -ENODATA && !size)
|
||||
err = 0; /* Removing non existed xattr. */
|
||||
if (!err)
|
||||
if (!err) {
|
||||
set_cached_acl(inode, type, acl);
|
||||
if (inode->i_mode != mode) {
|
||||
inode->i_mode = mode;
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(value);
|
||||
@ -706,13 +711,13 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
|
||||
inode->i_default_acl = NULL;
|
||||
}
|
||||
|
||||
if (!acl)
|
||||
inode->i_acl = NULL;
|
||||
else {
|
||||
if (acl) {
|
||||
if (!err)
|
||||
err = ntfs_set_acl_ex(mnt_userns, inode, acl,
|
||||
ACL_TYPE_ACCESS, true);
|
||||
posix_acl_release(acl);
|
||||
} else {
|
||||
inode->i_acl = NULL;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
Loading…
Reference in New Issue
Block a user