This pull request contains updates for JFFS2, UBI and UBIFS

JFFS2:
 	- Fix memory corruption in error path
 	- Spelling and coding style fixes
 
 UBI:
 	- Switch to BLK_MQ_F_BLOCKING in ubiblock
 	- Wire up partent device (for sysfs)
 	- Multiple UAF bugfixes
 	- Fix for an infinite loop in WL error path
 
 UBIFS:
 	- Fix for multiple memory leaks in error paths
 	- Fixes for wrong space accounting
 	- Minor cleanups
 	- Spelling and coding style fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQJKBAABCAA0FiEEdgfidid8lnn52cLTZvlZhesYu8EFAmP/BSMWHHJpY2hhcmRA
 c2lnbWEtc3Rhci5hdAAKCRBm+VmF6xi7wX5QEADnPsAW87AIi44UlkbAsJEjkEqU
 yQrI9UPsdqfE7K5OR7Vb2tOti3MLfaiJ5MTG64xNaTEgmrbcqo4GgENk4Pe6zJ9q
 azO/xWTr6r8G4aE70KhsUBc9Vc/99Ok48rxLHJS+u7s+FvOJ//WirGXjyNZEVDyx
 TgvH80rhUlfj1ExqEXLcUfQ53WUnR3PefOw+zIi29ldtgaI/TFmnWObl2A/XtsGr
 0ExIdqDnoTfSsYecfGP71jVbYH8u2mLFe2zNOYW1pvrHYhcet6Q6e+69fgnyNRf9
 5s5dbDPmmWN2Qdz63AWwHfC4mncoQdc7HbnnKIk6bTm3v6gqXQSrgy/hZEX3LmWr
 G41j7RLhBMZ2mljRfFQH97V71n9TL010T6jZ5zurvqErXtVdFImmInGmqY3AR13I
 fzUWnJf81xKgcMv6My2/nEVGDtrGLFdILoT8j6VIXQO6teqtF7Vip7UfamrwL399
 57fy0Iwbx2aume/IdwI3TZYacBQ2d8GvQxTZshJ+qx+gCvhb6EyiSIn3AOgtrbAo
 srXMy+6xXJecKNHR7Bl1C5BLei/25HDIYjk/yiAH/IhhV20c5eiySSvA+q720Gcz
 reBLFh7EtcP51B2GXMdSCOgYY8wGaMl5zlbiLYzY79ptgnnBJ7luuYjc3c02MGB9
 25qVLnpvC/ZREzZqLg==
 =fTYq
 -----END PGP SIGNATURE-----

Merge tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs

Pull jffs2, ubi and ubifs updates from Richard Weinberger:
 "JFFS2:
   - Fix memory corruption in error path
   - Spelling and coding style fixes

  UBI:
   - Switch to BLK_MQ_F_BLOCKING in ubiblock
   - Wire up partent device (for sysfs)
   - Multiple UAF bugfixes
   - Fix for an infinite loop in WL error path

  UBIFS:
   - Fix for multiple memory leaks in error paths
   - Fixes for wrong space accounting
   - Minor cleanups
   - Spelling and coding style fixes"

* tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs: (36 commits)
  ubi: block: Fix a possible use-after-free bug in ubiblock_create()
  ubifs: make kobj_type structures constant
  mtd: ubi: block: wire-up device parent
  mtd: ubi: wire-up parent MTD device
  ubi: use correct names in function kernel-doc comments
  ubi: block: set BLK_MQ_F_BLOCKING
  jffs2: Fix list_del corruption if compressors initialized failed
  jffs2: Use function instead of macro when initialize compressors
  jffs2: fix spelling mistake "neccecary"->"necessary"
  ubifs: Fix kernel-doc
  ubifs: Fix some kernel-doc comments
  UBI: Fastmap: Fix kernel-doc
  ubi: ubi_wl_put_peb: Fix infinite loop when wear-leveling work failed
  ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()
  ubi: fastmap: Fix missed fm_anchor PEB in wear-leveling after disabling fastmap
  ubifs: ubifs_releasepage: Remove ubifs_assert(0) to valid this process
  ubifs: ubifs_writepage: Mark page dirty after writing inode failed
  ubifs: dirty_cow_znode: Fix memleak in error handling path
  ubifs: Re-statistic cleaned znode count if commit failed
  ubi: Fix permission display of the debugfs files
  ...
This commit is contained in:
Linus Torvalds 2023-03-01 09:06:51 -08:00
commit e31b283a58
24 changed files with 275 additions and 169 deletions

View File

@ -35,7 +35,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/mtd/ubi.h>
#include <linux/workqueue.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
@ -62,7 +61,6 @@ struct ubiblock_param {
};
struct ubiblock_pdu {
struct work_struct work;
struct ubi_sgl usgl;
};
@ -82,8 +80,6 @@ struct ubiblock {
struct gendisk *gd;
struct request_queue *rq;
struct workqueue_struct *wq;
struct mutex dev_mutex;
struct list_head list;
struct blk_mq_tag_set tag_set;
@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
return NULL;
}
static int ubiblock_read(struct ubiblock_pdu *pdu)
static blk_status_t ubiblock_read(struct request *req)
{
int ret, leb, offset, bytes_left, to_read;
u64 pos;
struct request *req = blk_mq_rq_from_pdu(pdu);
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
struct ubiblock *dev = req->q->queuedata;
to_read = blk_rq_bytes(req);
pos = blk_rq_pos(req) << 9;
u64 pos = blk_rq_pos(req) << 9;
int to_read = blk_rq_bytes(req);
int bytes_left = to_read;
/* Get LEB:offset address to read from */
offset = do_div(pos, dev->leb_size);
leb = pos;
bytes_left = to_read;
int offset = do_div(pos, dev->leb_size);
int leb = pos;
struct req_iterator iter;
struct bio_vec bvec;
int ret;
blk_mq_start_request(req);
/*
* It is safe to ignore the return value of blk_rq_map_sg() because
* the number of sg entries is limited to UBI_MAX_SG_COUNT
* and ubi_read_sg() will check that limit.
*/
ubi_sgl_init(&pdu->usgl);
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
while (bytes_left) {
/*
@ -206,14 +211,17 @@ static int ubiblock_read(struct ubiblock_pdu *pdu)
ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
if (ret < 0)
return ret;
break;
bytes_left -= to_read;
to_read = bytes_left;
leb += 1;
offset = 0;
}
return 0;
rq_for_each_segment(bvec, req, iter)
flush_dcache_page(bvec.bv_page);
return errno_to_blk_status(ret);
}
static int ubiblock_open(struct block_device *bdev, fmode_t mode)
@ -289,47 +297,15 @@ static const struct block_device_operations ubiblock_ops = {
.getgeo = ubiblock_getgeo,
};
static void ubiblock_do_work(struct work_struct *work)
{
int ret;
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
struct request *req = blk_mq_rq_from_pdu(pdu);
struct req_iterator iter;
struct bio_vec bvec;
blk_mq_start_request(req);
/*
* It is safe to ignore the return value of blk_rq_map_sg() because
* the number of sg entries is limited to UBI_MAX_SG_COUNT
* and ubi_read_sg() will check that limit.
*/
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
ret = ubiblock_read(pdu);
rq_for_each_segment(bvec, req, iter)
flush_dcache_page(bvec.bv_page);
blk_mq_end_request(req, errno_to_blk_status(ret));
}
static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req = bd->rq;
struct ubiblock *dev = hctx->queue->queuedata;
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
switch (req_op(req)) {
switch (req_op(bd->rq)) {
case REQ_OP_READ:
ubi_sgl_init(&pdu->usgl);
queue_work(dev->wq, &pdu->work);
return BLK_STS_OK;
return ubiblock_read(bd->rq);
default:
return BLK_STS_IOERR;
}
}
static int ubiblock_init_request(struct blk_mq_tag_set *set,
@ -339,8 +315,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set,
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
INIT_WORK(&pdu->work, ubiblock_do_work);
return 0;
}
@ -354,9 +328,12 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
u64 size = vi->used_bytes >> 9;
if (vi->used_bytes % 512) {
pr_warn("UBI: block: volume size is not a multiple of 512, "
"last %llu bytes are ignored!\n",
vi->used_bytes - (size << 9));
if (vi->vol_type == UBI_DYNAMIC_VOLUME)
pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
vi->used_bytes - (size << 9));
else
pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
vi->used_bytes - (size << 9));
}
if ((sector_t)size != size)
@ -401,7 +378,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
@ -439,32 +416,20 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->rq = gd->queue;
blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
/*
* Create one workqueue per volume (per registered block device).
* Remember workqueues are cheap, they're not threads.
*/
dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
if (!dev->wq) {
ret = -ENOMEM;
goto out_remove_minor;
}
list_add_tail(&dev->list, &ubiblock_devices);
/* Must be the last step: anyone can call file ops from now on */
ret = add_disk(dev->gd);
ret = device_add_disk(vi->dev, dev->gd, NULL);
if (ret)
goto out_destroy_wq;
goto out_remove_minor;
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
mutex_unlock(&devices_mutex);
return 0;
out_destroy_wq:
list_del(&dev->list);
destroy_workqueue(dev->wq);
out_remove_minor:
list_del(&dev->list);
idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
put_disk(dev->gd);
@ -482,8 +447,6 @@ static void ubiblock_cleanup(struct ubiblock *dev)
{
/* Stop new requests to arrive */
del_gendisk(dev->gd);
/* Flush pending work */
destroy_workqueue(dev->wq);
/* Finally destroy the blk queue */
dev_info(disk_to_dev(dev->gd), "released");
put_disk(dev->gd);

View File

@ -35,7 +35,7 @@
#define MTD_PARAM_LEN_MAX 64
/* Maximum number of comma-separated items in the 'mtd=' parameter */
#define MTD_PARAM_MAX_COUNT 4
#define MTD_PARAM_MAX_COUNT 5
/* Maximum value for the number of bad PEBs per 1024 PEBs */
#define MAX_MTD_UBI_BEB_LIMIT 768
@ -53,12 +53,14 @@
* @ubi_num: UBI number
* @vid_hdr_offs: VID header offset
* @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
* @enable_fm: enable fastmap when value is non-zero
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
int ubi_num;
int vid_hdr_offs;
int max_beb_per1024;
int enable_fm;
};
/* Numbers of elements set in the @mtd_dev_param array */
@ -468,6 +470,7 @@ static int uif_init(struct ubi_device *ubi)
err = ubi_add_volume(ubi, ubi->volumes[i]);
if (err) {
ubi_err(ubi, "cannot add volume %d", i);
ubi->volumes[i] = NULL;
goto out_volumes;
}
}
@ -663,6 +666,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
ubi->vid_hdr_alsize)) {
ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
return -EINVAL;
}
dbg_gen("min_io_size %d", ubi->min_io_size);
dbg_gen("max_write_size %d", ubi->max_write_size);
dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@ -906,6 +915,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
ubi->dev.release = dev_release;
ubi->dev.class = &ubi_class;
ubi->dev.groups = ubi_dev_groups;
ubi->dev.parent = &mtd->dev;
ubi->mtd = mtd;
ubi->ubi_num = ubi_num;
@ -1248,7 +1258,7 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
p->vid_hdr_offs, p->max_beb_per1024,
false);
p->enable_fm == 0 ? true : false);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
pr_err("UBI error: cannot attach mtd%d\n",
@ -1427,7 +1437,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
int err = kstrtoint(token, 10, &p->max_beb_per1024);
if (err) {
pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n",
token);
return -EINVAL;
}
@ -1438,13 +1448,25 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
int err = kstrtoint(token, 10, &p->ubi_num);
if (err) {
pr_err("UBI error: bad value for ubi_num parameter: %s",
pr_err("UBI error: bad value for ubi_num parameter: %s\n",
token);
return -EINVAL;
}
} else
p->ubi_num = UBI_DEV_NUM_AUTO;
token = tokens[4];
if (token) {
int err = kstrtoint(token, 10, &p->enable_fm);
if (err) {
pr_err("UBI error: bad value for enable_fm parameter: %s\n",
token);
return -EINVAL;
}
} else
p->enable_fm = 0;
mtd_devs += 1;
return 0;
}
@ -1457,11 +1479,13 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
"Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
__stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
"Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
"Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
"\n"
"Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
"Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
"Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
"example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n"
"\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
#ifdef CONFIG_MTD_UBI_FASTMAP
module_param(fm_autoconvert, bool, 0644);

View File

@ -504,6 +504,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
unsigned long ubi_num = ubi->ubi_num;
struct ubi_debug_info *d = &ubi->dbg;
umode_t mode = S_IRUSR | S_IWUSR;
int n;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
@ -518,41 +519,41 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode,
d->dfs_dir, (void *)ubi_num,
&dfs_fops);
d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode,
d->dfs_dir, (void *)ubi_num,
&dfs_fops);
d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
S_IWUSR, d->dfs_dir,
mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
S_IWUSR, d->dfs_dir,
mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
S_IWUSR, d->dfs_dir,
mode, d->dfs_dir,
(void *)ubi_num,
&dfs_fops);
d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
S_IWUSR, d->dfs_dir,
mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
S_IWUSR, d->dfs_dir,
mode, d->dfs_dir,
(void *)ubi_num, &dfs_fops);
debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,

View File

@ -61,7 +61,7 @@ struct ubi_eba_table {
};
/**
* next_sqnum - get next sequence number.
* ubi_next_sqnum - get next sequence number.
* @ubi: UBI device description object
*
* This function returns next sequence number to use, which is just the current

View File

@ -146,13 +146,15 @@ void ubi_refill_pools(struct ubi_device *ubi)
if (ubi->fm_anchor) {
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
ubi->fm_anchor = NULL;
}
/*
* All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
if (!ubi->fm_disabled)
/*
* All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) {
enough = 0;

View File

@ -93,7 +93,7 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
/**
* new_fm_vhdr - allocate a new volume header for fastmap usage.
* new_fm_vbuf() - allocate a new volume header for fastmap usage.
* @ubi: UBI device description object
* @vol_id: the VID of the new header
*

View File

@ -79,6 +79,7 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
vi->name_len = vol->name_len;
vi->name = vol->name;
vi->cdev = vol->cdev.dev;
vi->dev = &vol->dev;
}
/**

View File

@ -10,7 +10,7 @@
#include "ubi.h"
/**
* calc_data_len - calculate how much real data is stored in a buffer.
* ubi_calc_data_len - calculate how much real data is stored in a buffer.
* @ubi: UBI device description object
* @buf: a buffer with the contents of the physical eraseblock
* @length: the buffer length

View File

@ -464,7 +464,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
goto out_acc;
goto out_free;
}
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
@ -512,8 +512,10 @@ out_acc:
ubi->avail_pebs += pebs;
spin_unlock(&ubi->volumes_lock);
}
return err;
out_free:
kfree(new_eba_tbl);
ubi_eba_destroy_table(new_eba_tbl);
return err;
}
@ -580,6 +582,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
if (err) {
ubi_err(ubi, "cannot add character device for volume %d, error %d",
vol_id, err);
vol_release(&vol->dev);
return err;
}
@ -590,15 +593,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err)
goto out_cdev;
if (err) {
cdev_del(&vol->cdev);
put_device(&vol->dev);
return err;
}
self_check_volumes(ubi);
return err;
out_cdev:
cdev_del(&vol->cdev);
return err;
}
/**

View File

@ -165,7 +165,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
}
/**
* wl_tree_destroy - destroy a wear-leveling entry.
* wl_entry_destroy - destroy a wear-leveling entry.
* @ubi: UBI device description object
* @e: the wear-leveling entry to add
*
@ -890,8 +890,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
if (e2)
if (e2) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
}
goto out_ro;
}
@ -973,11 +976,11 @@ out_error:
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
wl_entry_destroy(ubi, e1);
wl_entry_destroy(ubi, e2);
spin_unlock(&ubi->wl_lock);
ubi_free_vid_buf(vidb);
wl_entry_destroy(ubi, e1);
wl_entry_destroy(ubi, e2);
out_ro:
ubi_ro_mode(ubi);
@ -1130,14 +1133,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
if (err1) {
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
spin_unlock(&ubi->wl_lock);
err = err1;
goto out_ro;
}
return err;
}
spin_lock(&ubi->wl_lock);
wl_entry_destroy(ubi, e);
spin_unlock(&ubi->wl_lock);
if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
@ -1253,6 +1260,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
retry:
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
if (!e) {
/*
* This wl entry has been removed for some errors by other
* process (eg. wear leveling worker), corresponding process
* (except __erase_worker, which cannot concurrent with
* ubi_wl_put_peb) will set ubi ro_mode at the same time,
* just ignore this wl entry.
*/
spin_unlock(&ubi->wl_lock);
up_read(&ubi->fm_protect);
return 0;
}
if (e == ubi->move_from) {
/*
* User is putting the physical eraseblock which was selected to

View File

@ -364,20 +364,25 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
int __init jffs2_compressors_init(void)
{
int ret = 0;
/* Registering compressors */
#ifdef CONFIG_JFFS2_ZLIB
jffs2_zlib_init();
#endif
#ifdef CONFIG_JFFS2_RTIME
jffs2_rtime_init();
#endif
#ifdef CONFIG_JFFS2_RUBIN
jffs2_rubinmips_init();
jffs2_dynrubin_init();
#endif
#ifdef CONFIG_JFFS2_LZO
jffs2_lzo_init();
#endif
ret = jffs2_zlib_init();
if (ret)
goto exit;
ret = jffs2_rtime_init();
if (ret)
goto exit_zlib;
ret = jffs2_rubinmips_init();
if (ret)
goto exit_rtime;
ret = jffs2_dynrubin_init();
if (ret)
goto exit_runinmips;
ret = jffs2_lzo_init();
if (ret)
goto exit_dynrubin;
/* Setting default compression mode */
#ifdef CONFIG_JFFS2_CMODE_NONE
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
@ -396,23 +401,26 @@ int __init jffs2_compressors_init(void)
#endif
#endif
return 0;
exit_dynrubin:
jffs2_dynrubin_exit();
exit_runinmips:
jffs2_rubinmips_exit();
exit_rtime:
jffs2_rtime_exit();
exit_zlib:
jffs2_zlib_exit();
exit:
return ret;
}
int jffs2_compressors_exit(void)
{
/* Unregistering compressors */
#ifdef CONFIG_JFFS2_LZO
jffs2_lzo_exit();
#endif
#ifdef CONFIG_JFFS2_RUBIN
jffs2_dynrubin_exit();
jffs2_rubinmips_exit();
#endif
#ifdef CONFIG_JFFS2_RTIME
jffs2_rtime_exit();
#endif
#ifdef CONFIG_JFFS2_ZLIB
jffs2_zlib_exit();
#endif
return 0;
}

View File

@ -88,18 +88,32 @@ int jffs2_rubinmips_init(void);
void jffs2_rubinmips_exit(void);
int jffs2_dynrubin_init(void);
void jffs2_dynrubin_exit(void);
#else
static inline int jffs2_rubinmips_init(void) { return 0; }
static inline void jffs2_rubinmips_exit(void) {}
static inline int jffs2_dynrubin_init(void) { return 0; }
static inline void jffs2_dynrubin_exit(void) {}
#endif
#ifdef CONFIG_JFFS2_RTIME
int jffs2_rtime_init(void);
void jffs2_rtime_exit(void);
extern int jffs2_rtime_init(void);
extern void jffs2_rtime_exit(void);
#else
static inline int jffs2_rtime_init(void) { return 0; }
static inline void jffs2_rtime_exit(void) {}
#endif
#ifdef CONFIG_JFFS2_ZLIB
int jffs2_zlib_init(void);
void jffs2_zlib_exit(void);
extern int jffs2_zlib_init(void);
extern void jffs2_zlib_exit(void);
#else
static inline int jffs2_zlib_init(void) { return 0; }
static inline void jffs2_zlib_exit(void) {}
#endif
#ifdef CONFIG_JFFS2_LZO
int jffs2_lzo_init(void);
void jffs2_lzo_exit(void);
extern int jffs2_lzo_init(void);
extern void jffs2_lzo_exit(void);
#else
static inline int jffs2_lzo_init(void) { return 0; }
static inline void jffs2_lzo_exit(void) {}
#endif
#endif /* __JFFS2_COMPR_H__ */

View File

@ -137,19 +137,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
pgoff_t index = pos >> PAGE_SHIFT;
uint32_t pageofs = index << PAGE_SHIFT;
int ret = 0;
jffs2_dbg(1, "%s()\n", __func__);
if (pageofs > inode->i_size) {
/* Make new hole frag from old EOF to new page */
if (pos > inode->i_size) {
/* Make new hole frag from old EOF to new position */
struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
uint32_t alloc_len;
jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
(unsigned int)inode->i_size, pageofs);
jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n",
(unsigned int)inode->i_size, (uint32_t)pos);
ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
@ -169,10 +168,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
ri.mode = cpu_to_jemode(inode->i_mode);
ri.uid = cpu_to_je16(i_uid_read(inode));
ri.gid = cpu_to_je16(i_gid_read(inode));
ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
ri.isize = cpu_to_je32((uint32_t)pos);
ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
ri.offset = cpu_to_je32(inode->i_size);
ri.dsize = cpu_to_je32(pageofs - inode->i_size);
ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size);
ri.csize = cpu_to_je32(0);
ri.compr = JFFS2_COMPR_ZERO;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
@ -202,7 +201,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
goto out_err;
}
jffs2_complete_reservation(c);
inode->i_size = pageofs;
inode->i_size = pos;
mutex_unlock(&f->sem);
}

View File

@ -403,7 +403,7 @@ int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc)
/* We stop if it was running, then restart if it needs to.
This also catches the case where it was stopped and this
is just a remount to restart it.
Flush the writebuffer, if neccecary, else we loose it */
Flush the writebuffer, if necessary, else we loose it */
if (!sb_rdonly(sb)) {
jffs2_stop_garbage_collect_thread(c);
mutex_lock(&c->alloc_sem);

View File

@ -209,11 +209,10 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs)
subtract_lebs += 1;
/*
* The GC journal head LEB is not really accessible. And since
* different write types go to different heads, we may count only on
* one head's space.
* Since different write types go to different heads, we should
* reserve one leb for each head.
*/
subtract_lebs += c->jhead_cnt - 1;
subtract_lebs += c->jhead_cnt;
/* We also reserve one LEB for deletions, which bypass budgeting */
subtract_lebs += 1;
@ -400,7 +399,7 @@ static int calc_dd_growth(const struct ubifs_info *c,
dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
if (req->dirtied_ino)
dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
dd_growth += c->bi.inode_budget * req->dirtied_ino;
if (req->mod_dent)
dd_growth += c->bi.dent_budget;
dd_growth += req->dirtied_ino_d;

View File

@ -1151,7 +1151,6 @@ static int ubifs_symlink(struct mnt_idmap *idmap, struct inode *dir,
int err, sz_change, len = strlen(symname);
struct fscrypt_str disk_link;
struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
.new_ino_d = ALIGN(len, 8),
.dirtied_ino = 1 };
struct fscrypt_name nm;
@ -1167,6 +1166,7 @@ static int ubifs_symlink(struct mnt_idmap *idmap, struct inode *dir,
* Budget request settings: new inode, new direntry and changing parent
* directory inode.
*/
req.new_ino_d = ALIGN(disk_link.len - 1, 8);
err = ubifs_budget_space(c, &req);
if (err)
return err;
@ -1324,6 +1324,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlink) {
ubifs_assert(c, inode_is_locked(new_inode));
/* Budget for old inode's data when its nlink > 1. */
req.dirtied_ino_d = ALIGN(ubifs_inode(new_inode)->data_len, 8);
err = ubifs_purge_xattrs(new_inode);
if (err)
return err;
@ -1566,6 +1568,15 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
ubifs_assert(c, fst_inode && snd_inode);
/*
* Budget request settings: changing two direntries, changing the two
* parent directory inodes.
*/
dbg_gen("dent '%pd' ino %lu in dir ino %lu exchange dent '%pd' ino %lu in dir ino %lu",
old_dentry, fst_inode->i_ino, old_dir->i_ino,
new_dentry, snd_inode->i_ino, new_dir->i_ino);
err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm);
if (err)
return err;
@ -1576,6 +1587,10 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
err = ubifs_budget_space(c, &req);
if (err)
goto out;
lock_4_inodes(old_dir, new_dir, NULL, NULL);
time = current_time(old_dir);
@ -1601,6 +1616,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
unlock_4_inodes(old_dir, new_dir, NULL, NULL);
ubifs_release_budget(c, &req);
out:
fscrypt_free_filename(&fst_nm);
fscrypt_free_filename(&snd_nm);
return err;

View File

@ -1032,7 +1032,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
if (page->index >= synced_i_size >> PAGE_SHIFT) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
goto out_unlock;
goto out_redirty;
/*
* The inode has been written, but the write-buffer has
* not been synchronized, so in case of an unclean
@ -1060,11 +1060,17 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
if (i_size > synced_i_size) {
err = inode->i_sb->s_op->write_inode(inode, NULL);
if (err)
goto out_unlock;
goto out_redirty;
}
return do_writepage(page, len);
out_redirty:
/*
* redirty_page_for_writepage() won't call ubifs_dirty_inode() because
* it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
* there is no need to do space budget for dirty inode.
*/
redirty_page_for_writepage(wbc, page);
out_unlock:
unlock_page(page);
return err;
@ -1466,14 +1472,23 @@ static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
/*
* An attempt to release a dirty page without budgeting for it - should
* not happen.
*/
if (folio_test_writeback(folio))
return false;
/*
* Page is private but not dirty, weird? There is one condition
* making it happened. ubifs_writepage skipped the page because
* page index beyonds isize (for example. truncated by other
* process named A), then the page is invalidated by fadvise64
* syscall before being truncated by process A.
*/
ubifs_assert(c, folio_test_private(folio));
ubifs_assert(c, 0);
if (folio_test_checked(folio))
release_new_page_budget(c);
else
release_existing_page_budget(c);
atomic_long_dec(&c->dirty_pg_cnt);
folio_detach_private(folio);
folio_clear_checked(folio);
return true;

View File

@ -488,7 +488,7 @@ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
}
/**
* wbuf_timer_callback - write-buffer timer callback function.
* wbuf_timer_callback_nolock - write-buffer timer callback function.
* @timer: timer data (write-buffer descriptor)
*
* This function is called when the write-buffer timer expires.
@ -505,7 +505,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
}
/**
* new_wbuf_timer - start new write-buffer timer.
* new_wbuf_timer_nolock - start new write-buffer timer.
* @c: UBIFS file-system description object
* @wbuf: write-buffer descriptor
*/
@ -531,7 +531,7 @@ static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
}
/**
* cancel_wbuf_timer - cancel write-buffer timer.
* cancel_wbuf_timer_nolock - cancel write-buffer timer.
* @wbuf: write-buffer descriptor
*/
static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)

View File

@ -1201,9 +1201,13 @@ out_free:
* ubifs_jnl_rename - rename a directory entry.
* @c: UBIFS file-system description object
* @old_dir: parent inode of directory entry to rename
* @old_dentry: directory entry to rename
* @old_inode: directory entry's inode to rename
* @old_nm: name of the old directory entry to rename
* @new_dir: parent inode of directory entry to rename
* @new_dentry: new directory entry (or directory entry to replace)
* @new_inode: new directory entry's inode (or directory entry's inode to
* replace)
* @new_nm: new name of the new directory entry
* @whiteout: whiteout inode
* @sync: non-zero if the write-buffer has to be synchronized
*
* This function implements the re-name operation which may involve writing up

View File

@ -833,7 +833,7 @@ static int alloc_wbufs(struct ubifs_info *c)
INIT_LIST_HEAD(&c->jheads[i].buds_list);
err = ubifs_wbuf_init(c, &c->jheads[i].wbuf);
if (err)
return err;
goto out_wbuf;
c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
c->jheads[i].wbuf.jhead = i;
@ -841,7 +841,7 @@ static int alloc_wbufs(struct ubifs_info *c)
c->jheads[i].log_hash = ubifs_hash_get_desc(c);
if (IS_ERR(c->jheads[i].log_hash)) {
err = PTR_ERR(c->jheads[i].log_hash);
goto out;
goto out_log_hash;
}
}
@ -854,9 +854,18 @@ static int alloc_wbufs(struct ubifs_info *c)
return 0;
out:
while (i--)
out_log_hash:
kfree(c->jheads[i].wbuf.buf);
kfree(c->jheads[i].wbuf.inodes);
out_wbuf:
while (i--) {
kfree(c->jheads[i].wbuf.buf);
kfree(c->jheads[i].wbuf.inodes);
kfree(c->jheads[i].log_hash);
}
kfree(c->jheads);
c->jheads = NULL;
return err;
}

View File

@ -74,13 +74,13 @@ static const struct sysfs_ops ubifs_attr_ops = {
.show = ubifs_attr_show,
};
static struct kobj_type ubifs_sb_ktype = {
static const struct kobj_type ubifs_sb_ktype = {
.default_groups = ubifs_groups,
.sysfs_ops = &ubifs_attr_ops,
.release = ubifs_sb_release,
};
static struct kobj_type ubifs_ktype = {
static const struct kobj_type ubifs_ktype = {
.sysfs_ops = &ubifs_attr_ops,
};
@ -144,6 +144,8 @@ int __init ubifs_sysfs_init(void)
kobject_set_name(&ubifs_kset.kobj, "ubifs");
ubifs_kset.kobj.parent = fs_kobj;
ret = kset_register(&ubifs_kset);
if (ret)
kset_put(&ubifs_kset);
return ret;
}

View File

@ -267,11 +267,18 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
if (zbr->len) {
err = insert_old_idx(c, zbr->lnum, zbr->offs);
if (unlikely(err))
return ERR_PTR(err);
/*
* Obsolete znodes will be freed by tnc_destroy_cnext()
* or free_obsolete_znodes(), copied up znodes should
* be added back to tnc and freed by
* ubifs_destroy_tnc_subtree().
*/
goto out;
err = add_idx_dirt(c, zbr->lnum, zbr->len);
} else
err = 0;
out:
zbr->znode = zn;
zbr->lnum = 0;
zbr->offs = 0;
@ -3053,6 +3060,21 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
cnext = cnext->cnext;
if (ubifs_zn_obsolete(znode))
kfree(znode);
else if (!ubifs_zn_cow(znode)) {
/*
* Don't forget to update clean znode count after
* committing failed, because ubifs will check this
* count while closing tnc. Non-obsolete znode could
* be re-dirtied during committing process, so dirty
* flag is untrustable. The flag 'COW_ZNODE' is set
* for each dirty znode before committing, and it is
* cleared as long as the znode become clean, so we
* can statistic clean znode count according to this
* flag.
*/
atomic_long_inc(&c->clean_zn_cnt);
atomic_long_inc(&ubifs_clean_zn_cnt);
}
} while (cnext && cnext != c->cnext);
}

View File

@ -1623,8 +1623,13 @@ static inline int ubifs_check_hmac(const struct ubifs_info *c,
return crypto_memneq(expected, got, c->hmac_desc_len);
}
#ifdef CONFIG_UBIFS_FS_AUTHENTICATION
void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
const u8 *hash, int lnum, int offs);
#else
static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
const u8 *hash, int lnum, int offs) {};
#endif
int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf,
const u8 *expected);

View File

@ -110,6 +110,7 @@ struct ubi_volume_info {
int name_len;
const char *name;
dev_t cdev;
struct device *dev;
};
/**