mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
UBI: fix checkpatch.pl errors and warnings
Just out or curiousity ran checkpatch.pl for whole UBI, and discovered there are quite a few of stylistic issues. Fix them. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
This commit is contained in:
parent
4d88de4beb
commit
9c9ec14770
@ -51,14 +51,13 @@
|
||||
* @name: MTD device name or number string
|
||||
* @vid_hdr_offs: VID header offset
|
||||
*/
|
||||
struct mtd_dev_param
|
||||
{
|
||||
struct mtd_dev_param {
|
||||
char name[MTD_PARAM_LEN_MAX];
|
||||
int vid_hdr_offs;
|
||||
};
|
||||
|
||||
/* Numbers of elements set in the @mtd_dev_param array */
|
||||
static int mtd_devs = 0;
|
||||
static int mtd_devs;
|
||||
|
||||
/* MTD devices specification parameters */
|
||||
static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
|
||||
@ -781,7 +780,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
|
||||
if (!ubi_devices[ubi_num])
|
||||
break;
|
||||
if (ubi_num == UBI_MAX_DEVICES) {
|
||||
dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
|
||||
dbg_err("only %d UBI devices may be created",
|
||||
UBI_MAX_DEVICES);
|
||||
return -ENFILE;
|
||||
}
|
||||
} else {
|
||||
|
@ -39,9 +39,9 @@
|
||||
#include <linux/stat.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <mtd/ubi-user.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/div64.h>
|
||||
#include "ubi.h"
|
||||
|
||||
@ -352,7 +352,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
|
||||
}
|
||||
|
||||
#else
|
||||
#define vol_cdev_direct_write(file, buf, count, offp) -EPERM
|
||||
#define vol_cdev_direct_write(file, buf, count, offp) (-EPERM)
|
||||
#endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */
|
||||
|
||||
static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
|
||||
|
@ -189,9 +189,7 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
|
||||
le->users += 1;
|
||||
spin_unlock(&ubi->ltree_lock);
|
||||
|
||||
if (le_free)
|
||||
kfree(le_free);
|
||||
|
||||
kfree(le_free);
|
||||
return le;
|
||||
}
|
||||
|
||||
@ -503,9 +501,8 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
|
||||
struct ubi_vid_hdr *vid_hdr;
|
||||
|
||||
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
|
||||
if (!vid_hdr) {
|
||||
if (!vid_hdr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_lock(&ubi->buf_mutex);
|
||||
|
||||
|
@ -249,8 +249,8 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
instr->state = MTD_ERASE_DONE;
|
||||
mtd_erase_callback(instr);
|
||||
instr->state = MTD_ERASE_DONE;
|
||||
mtd_erase_callback(instr);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
|
@ -167,8 +167,8 @@ retry:
|
||||
}
|
||||
|
||||
if (read != len && retries++ < UBI_IO_RETRIES) {
|
||||
dbg_io("error %d while reading %d bytes from PEB %d:%d, "
|
||||
"read only %zd bytes, retry",
|
||||
dbg_io("error %d while reading %d bytes from PEB %d:%d,"
|
||||
" read only %zd bytes, retry",
|
||||
err, len, pnum, offset, read);
|
||||
yield();
|
||||
goto retry;
|
||||
@ -705,8 +705,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
|
||||
|
||||
if (hdr_crc != crc) {
|
||||
if (verbose) {
|
||||
ubi_warn("bad EC header CRC at PEB %d, calculated %#08x,"
|
||||
" read %#08x", pnum, crc, hdr_crc);
|
||||
ubi_warn("bad EC header CRC at PEB %d, calculated "
|
||||
"%#08x, read %#08x", pnum, crc, hdr_crc);
|
||||
ubi_dbg_dump_ec_hdr(ec_hdr);
|
||||
}
|
||||
return UBI_IO_BAD_EC_HDR;
|
||||
|
@ -248,7 +248,8 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
|
||||
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
|
||||
|
||||
if (seb->sqnum == 0 && sqnum2 == 0) {
|
||||
long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
|
||||
long long abs;
|
||||
long long v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
|
||||
|
||||
/*
|
||||
* UBI constantly increases the logical eraseblock version
|
||||
@ -752,7 +753,8 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
|
||||
* This function returns a zero if the physical eraseblock was successfully
|
||||
* handled and a negative error code in case of failure.
|
||||
*/
|
||||
static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
|
||||
static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
|
||||
int pnum)
|
||||
{
|
||||
long long uninitialized_var(ec);
|
||||
int err, bitflips = 0, vol_id, ec_corr = 0;
|
||||
@ -1301,8 +1303,7 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
|
||||
if (err < 0) {
|
||||
kfree(buf);
|
||||
return err;
|
||||
}
|
||||
else if (err)
|
||||
} else if (err)
|
||||
buf[pnum] = 1;
|
||||
}
|
||||
|
||||
|
@ -473,7 +473,8 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
const void __user *buf, int count);
|
||||
|
||||
/* misc.c */
|
||||
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
|
||||
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
|
||||
int length);
|
||||
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
|
||||
void ubi_calculate_reserved(struct ubi_device *ubi);
|
||||
|
||||
|
@ -39,7 +39,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/div64.h>
|
||||
#include "ubi.h"
|
||||
|
||||
@ -246,7 +246,8 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
|
||||
err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
|
||||
UBI_UNKNOWN);
|
||||
} else {
|
||||
/*
|
||||
* When writing static volume, and this is the last logical
|
||||
@ -418,7 +419,8 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
|
||||
if (vol->upd_received == vol->upd_bytes) {
|
||||
int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
|
||||
|
||||
memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
|
||||
memset(vol->upd_buf + vol->upd_bytes, 0xFF,
|
||||
len - vol->upd_bytes);
|
||||
len = ubi_calc_data_len(ubi, vol->upd_buf, len);
|
||||
err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
|
||||
vol->upd_buf, len, UBI_UNKNOWN);
|
||||
|
@ -253,7 +253,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Calculate how many eraseblocks are requested */
|
||||
/* Calculate how many eraseblocks are requested */
|
||||
vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
|
||||
bytes = req->bytes;
|
||||
if (do_div(bytes, vol->usable_leb_size))
|
||||
@ -858,7 +858,7 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
|
||||
|
||||
if (alignment != vol->alignment || data_pad != vol->data_pad ||
|
||||
upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
|
||||
name_len!= vol->name_len || strncmp(name, vol->name, name_len)) {
|
||||
name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
|
||||
ubi_err("volume info is different");
|
||||
goto fail;
|
||||
}
|
||||
|
@ -461,7 +461,8 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
|
||||
if (!leb_corrupted[0]) {
|
||||
/* LEB 0 is OK */
|
||||
if (leb[1])
|
||||
leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size);
|
||||
leb_corrupted[1] = memcmp(leb[0], leb[1],
|
||||
ubi->vtbl_size);
|
||||
if (leb_corrupted[1]) {
|
||||
ubi_warn("volume table copy #2 is corrupted");
|
||||
err = create_vtbl(ubi, si, 1, leb[0]);
|
||||
@ -859,11 +860,10 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
|
||||
|
||||
out_free:
|
||||
vfree(ubi->vtbl);
|
||||
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++)
|
||||
if (ubi->volumes[i]) {
|
||||
kfree(ubi->volumes[i]);
|
||||
ubi->volumes[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
|
||||
kfree(ubi->volumes[i]);
|
||||
ubi->volumes[i] = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -475,52 +475,47 @@ retry:
|
||||
}
|
||||
|
||||
switch (dtype) {
|
||||
case UBI_LONGTERM:
|
||||
/*
|
||||
* For long term data we pick a physical eraseblock
|
||||
* with high erase counter. But the highest erase
|
||||
* counter we can pick is bounded by the the lowest
|
||||
* erase counter plus %WL_FREE_MAX_DIFF.
|
||||
*/
|
||||
e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
|
||||
protect = LT_PROTECTION;
|
||||
break;
|
||||
case UBI_UNKNOWN:
|
||||
/*
|
||||
* For unknown data we pick a physical eraseblock with
|
||||
* medium erase counter. But we by no means can pick a
|
||||
* physical eraseblock with erase counter greater or
|
||||
* equivalent than the lowest erase counter plus
|
||||
* %WL_FREE_MAX_DIFF.
|
||||
*/
|
||||
first = rb_entry(rb_first(&ubi->free),
|
||||
struct ubi_wl_entry, rb);
|
||||
last = rb_entry(rb_last(&ubi->free),
|
||||
struct ubi_wl_entry, rb);
|
||||
case UBI_LONGTERM:
|
||||
/*
|
||||
* For long term data we pick a physical eraseblock with high
|
||||
* erase counter. But the highest erase counter we can pick is
|
||||
* bounded by the the lowest erase counter plus
|
||||
* %WL_FREE_MAX_DIFF.
|
||||
*/
|
||||
e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
|
||||
protect = LT_PROTECTION;
|
||||
break;
|
||||
case UBI_UNKNOWN:
|
||||
/*
|
||||
* For unknown data we pick a physical eraseblock with medium
|
||||
* erase counter. But we by no means can pick a physical
|
||||
* eraseblock with erase counter greater or equivalent than the
|
||||
* lowest erase counter plus %WL_FREE_MAX_DIFF.
|
||||
*/
|
||||
first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb);
|
||||
last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb);
|
||||
|
||||
if (last->ec - first->ec < WL_FREE_MAX_DIFF)
|
||||
e = rb_entry(ubi->free.rb_node,
|
||||
struct ubi_wl_entry, rb);
|
||||
else {
|
||||
medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
|
||||
e = find_wl_entry(&ubi->free, medium_ec);
|
||||
}
|
||||
protect = U_PROTECTION;
|
||||
break;
|
||||
case UBI_SHORTTERM:
|
||||
/*
|
||||
* For short term data we pick a physical eraseblock
|
||||
* with the lowest erase counter as we expect it will
|
||||
* be erased soon.
|
||||
*/
|
||||
e = rb_entry(rb_first(&ubi->free),
|
||||
struct ubi_wl_entry, rb);
|
||||
protect = ST_PROTECTION;
|
||||
break;
|
||||
default:
|
||||
protect = 0;
|
||||
e = NULL;
|
||||
BUG();
|
||||
if (last->ec - first->ec < WL_FREE_MAX_DIFF)
|
||||
e = rb_entry(ubi->free.rb_node,
|
||||
struct ubi_wl_entry, rb);
|
||||
else {
|
||||
medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
|
||||
e = find_wl_entry(&ubi->free, medium_ec);
|
||||
}
|
||||
protect = U_PROTECTION;
|
||||
break;
|
||||
case UBI_SHORTTERM:
|
||||
/*
|
||||
* For short term data we pick a physical eraseblock with the
|
||||
* lowest erase counter as we expect it will be erased soon.
|
||||
*/
|
||||
e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb);
|
||||
protect = ST_PROTECTION;
|
||||
break;
|
||||
default:
|
||||
protect = 0;
|
||||
e = NULL;
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -584,7 +579,8 @@ found:
|
||||
* This function returns zero in case of success and a negative error code in
|
||||
* case of failure.
|
||||
*/
|
||||
static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
|
||||
static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
|
||||
int torture)
|
||||
{
|
||||
int err;
|
||||
struct ubi_ec_hdr *ec_hdr;
|
||||
@ -1060,8 +1056,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
/*
|
||||
* One more erase operation has happened, take care about protected
|
||||
* physical eraseblocks.
|
||||
* One more erase operation has happened, take care about
|
||||
* protected physical eraseblocks.
|
||||
*/
|
||||
check_protection_over(ubi);
|
||||
|
||||
|
@ -188,14 +188,14 @@ enum {
|
||||
* it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages.
|
||||
*
|
||||
* But in rare cases, if this optimizes things, the VID header may be placed to
|
||||
* a different offset. For example, the boot-loader might do things faster if the
|
||||
* VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As
|
||||
* the boot-loader would not normally need to read EC headers (unless it needs
|
||||
* UBI in RW mode), it might be faster to calculate ECC. This is weird example,
|
||||
* but it real-life example. So, in this example, @vid_hdr_offer would be
|
||||
* 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes
|
||||
* aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page
|
||||
* of the first page and add needed padding.
|
||||
* a different offset. For example, the boot-loader might do things faster if
|
||||
* the VID header sits at the end of the first 2KiB NAND page with 4 sub-pages.
|
||||
* As the boot-loader would not normally need to read EC headers (unless it
|
||||
* needs UBI in RW mode), it might be faster to calculate ECC. This is weird
|
||||
* example, but it real-life example. So, in this example, @vid_hdr_offer would
|
||||
* be 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes
|
||||
* aligned, which is OK, as UBI is clever enough to realize this is 4th
|
||||
* sub-page of the first page and add needed padding.
|
||||
*/
|
||||
struct ubi_attach_req {
|
||||
int32_t ubi_num;
|
||||
|
Loading…
Reference in New Issue
Block a user