2019-05-23 09:14:39 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2010-08-08 19:58:20 +00:00
|
|
|
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
|
2005-11-06 23:14:42 +00:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/fs.h>
|
2007-08-10 20:01:06 +00:00
|
|
|
#include <linux/mm.h>
|
2006-10-11 11:52:47 +00:00
|
|
|
#include <linux/err.h>
|
2005-11-06 23:14:42 +00:00
|
|
|
#include <linux/init.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
2005-11-06 23:14:42 +00:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sched.h>
|
2010-06-02 12:28:52 +00:00
|
|
|
#include <linux/mutex.h>
|
2009-02-12 10:40:00 +00:00
|
|
|
#include <linux/backing-dev.h>
|
2009-04-09 05:53:13 +00:00
|
|
|
#include <linux/compat.h>
|
2010-05-17 13:55:47 +00:00
|
|
|
#include <linux/mount.h>
|
2010-09-17 10:31:42 +00:00
|
|
|
#include <linux/blkpg.h>
|
2012-03-23 22:01:50 +00:00
|
|
|
#include <linux/magic.h>
|
2013-10-13 21:05:23 +00:00
|
|
|
#include <linux/major.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mtd/mtd.h>
|
2010-09-17 10:31:42 +00:00
|
|
|
#include <linux/mtd/partitions.h>
|
2010-06-15 07:30:15 +00:00
|
|
|
#include <linux/mtd/map.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-12-24 19:46:01 +00:00
|
|
|
#include <linux/uaccess.h>
|
2005-06-30 00:23:27 +00:00
|
|
|
|
mtd: merge mtdchar module with mtdcore
The MTD subsystem has historically tried to be as configurable as possible. The
side-effect of this is that its configuration menu is rather large, and we are
gradually shrinking it. For example, we recently merged partitions support with
the mtdcore.
This patch does the next step - it merges the mtdchar module to mtdcore. And in
this case this is not only about eliminating too fine-grained separation and
simplifying the configuration menu. This is also about eliminating seemingly
useless kernel module.
Indeed, mtdchar is a module that allows user-space making use of MTD devices
via /dev/mtd* character devices. If users do not enable it, they simply cannot
use MTD devices at all. They cannot read or write the flash contents. Is it a
sane and useful setup? I believe not. And everyone just enables mtdchar.
Having mtdchar separate is also a little bit harmful. People sometimes miss the
fact that they need to enable an additional configuration option to have
user-space MTD interfaces, and then they wonder why on earth the kernel does
not allow using the flash? They spend time asking around.
Thus, let's just get rid of this module and make it part of mtd core.
Note, mtdchar had additional configuration option to enable OTP interfaces,
which are present on some flashes. I removed that option as well - it saves a
really tiny amount space.
[dwmw2: Strictly speaking, you can mount file systems on MTD devices just
fine without the mtdchar (or mtdblock) devices; you just can't do
other manipulations directly on the underlying device. But still I
agree that it makes sense to make this unconditional. And Yay! we
get to kill off an instance of checking CONFIG_foo_MODULE, which is
an abomination that should never happen.]
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2013-03-14 11:27:40 +00:00
|
|
|
#include "mtdcore.h"
|
|
|
|
|
2005-02-08 19:12:53 +00:00
|
|
|
/*
|
2006-05-29 22:37:34 +00:00
|
|
|
* Data structure to hold the pointer to the mtd device as well
|
2011-07-20 16:53:42 +00:00
|
|
|
* as mode information of various use cases.
|
2005-02-08 19:12:53 +00:00
|
|
|
*/
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info {
|
|
|
|
struct mtd_info *mtd;
|
|
|
|
enum mtd_file_modes mode;
|
|
|
|
};
|
2005-02-08 17:45:55 +00:00
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
2013-06-16 16:27:42 +00:00
|
|
|
return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static int mtdchar_open(struct inode *inode, struct file *file)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int minor = iminor(inode);
|
|
|
|
int devnum = minor >> 1;
|
2008-05-15 16:10:37 +00:00
|
|
|
int ret = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct mtd_info *mtd;
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_open\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* You can't open the RO devices RW */
|
2008-09-02 19:28:45 +00:00
|
|
|
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
mtd = get_mtd_device(NULL, devnum);
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2021-02-17 21:18:44 +00:00
|
|
|
if (IS_ERR(mtd))
|
|
|
|
return PTR_ERR(mtd);
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2009-02-12 10:40:00 +00:00
|
|
|
if (mtd->type == MTD_ABSENT) {
|
2008-05-15 16:10:37 +00:00
|
|
|
ret = -ENODEV;
|
2012-04-09 05:36:28 +00:00
|
|
|
goto out1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* You can't open it RW if it's not a writeable device */
|
2008-09-02 19:28:45 +00:00
|
|
|
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
|
2008-05-15 16:10:37 +00:00
|
|
|
ret = -EACCES;
|
2015-01-14 09:42:32 +00:00
|
|
|
goto out1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
|
|
|
|
if (!mfi) {
|
2008-05-15 16:10:37 +00:00
|
|
|
ret = -ENOMEM;
|
2015-01-14 09:42:32 +00:00
|
|
|
goto out1;
|
2006-05-29 22:37:34 +00:00
|
|
|
}
|
|
|
|
mfi->mtd = mtd;
|
|
|
|
file->private_data = mfi;
|
2012-04-09 05:36:28 +00:00
|
|
|
return 0;
|
2006-05-29 22:37:34 +00:00
|
|
|
|
2012-04-09 05:36:28 +00:00
|
|
|
out1:
|
|
|
|
put_mtd_device(mtd);
|
2008-05-15 16:10:37 +00:00
|
|
|
return ret;
|
2011-12-23 15:27:46 +00:00
|
|
|
} /* mtdchar_open */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*====================================================================*/
|
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static int mtdchar_close(struct inode *inode, struct file *file)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_close\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-06-26 22:56:40 +00:00
|
|
|
/* Only sync if opened RW */
|
2011-12-30 14:35:35 +00:00
|
|
|
if ((file->f_mode & FMODE_WRITE))
|
2011-12-23 17:03:12 +00:00
|
|
|
mtd_sync(mtd);
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
put_mtd_device(mtd);
|
2006-05-29 22:37:34 +00:00
|
|
|
file->private_data = NULL;
|
|
|
|
kfree(mfi);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
2011-12-23 15:27:46 +00:00
|
|
|
} /* mtdchar_close */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-04-08 15:51:33 +00:00
|
|
|
/* Back in June 2001, dwmw2 wrote:
|
|
|
|
*
|
|
|
|
* FIXME: This _really_ needs to die. In 2.5, we should lock the
|
|
|
|
* userspace buffer down and use it directly with readv/writev.
|
|
|
|
*
|
|
|
|
* The implementation below, using mtd_kmalloc_up_to, mitigates
|
|
|
|
* allocation failures when the system is under low-memory situations
|
|
|
|
* or if memory is highly fragmented at the cost of reducing the
|
|
|
|
* performance of the requested transfer due to a smaller buffer size.
|
|
|
|
*
|
|
|
|
* A more complex but more memory-efficient implementation based on
|
|
|
|
* get_user_pages and iovecs to cover extents of those pages is a
|
|
|
|
* longer-term goal, as intimated by dwmw2 above. However, for the
|
|
|
|
* write case, this requires yet more complex head and tail transfer
|
|
|
|
* handling when those head and tail offsets and sizes are such that
|
|
|
|
* alignment requirements are not met in the NAND subdriver.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
|
|
|
|
loff_t *ppos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
mtd: remove extra retlen assignment
MTD functions always assign the 'retlen' argument to 0 at the very
beginning - the callers do not have to do this.
I used the following semantic patch to find these places:
@@
identifier retlen;
expression a, b, c, d, e;
constant C;
type T;
@@
(
- retlen = C;
|
T
-retlen = C
+ retlen
;
)
... when != retlen
when exists
(
mtd_read(a, b, c, &retlen, d)
|
mtd_write(a, b, c, &retlen, d)
|
mtd_panic_write(a, b, c, &retlen, d)
|
mtd_point(a, b, c, &retlen, d, e)
|
mtd_read_fact_prot_reg(a, b, c, &retlen, d)
|
mtd_write_user_prot_reg(a, b, c, &retlen, d)
|
mtd_read_user_prot_reg(a, b, c, &retlen, d)
|
mtd_writev(a, b, c, d, &retlen)
)
I ran it twice, because there were cases of double zero assigments
in mtd tests. Then I went through the patch to verify that spatch
did not find any false positives.
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-12-29 13:16:28 +00:00
|
|
|
size_t retlen;
|
2005-04-16 22:20:36 +00:00
|
|
|
size_t total_retlen=0;
|
|
|
|
int ret=0;
|
|
|
|
int len;
|
2011-04-08 15:51:33 +00:00
|
|
|
size_t size = count;
|
2005-04-16 22:20:36 +00:00
|
|
|
char *kbuf;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_read\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-07-07 03:37:22 +00:00
|
|
|
if (*ppos + count > mtd->size) {
|
|
|
|
if (*ppos < mtd->size)
|
|
|
|
count = mtd->size - *ppos;
|
|
|
|
else
|
|
|
|
count = 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2011-04-08 15:51:33 +00:00
|
|
|
kbuf = mtd_kmalloc_up_to(mtd, &size);
|
2006-04-17 16:38:15 +00:00
|
|
|
if (!kbuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
while (count) {
|
2011-04-08 15:51:33 +00:00
|
|
|
len = min_t(size_t, count, size);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
switch (mfi->mode) {
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_OTP_FACTORY:
|
2011-12-23 16:40:06 +00:00
|
|
|
ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
|
|
|
|
&retlen, kbuf);
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_OTP_USER:
|
2011-12-23 16:47:59 +00:00
|
|
|
ret = mtd_read_user_prot_reg(mtd, *ppos, len,
|
|
|
|
&retlen, kbuf);
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_RAW:
|
2006-05-29 22:37:34 +00:00
|
|
|
{
|
2019-09-19 19:06:21 +00:00
|
|
|
struct mtd_oob_ops ops = {};
|
2006-05-29 22:37:34 +00:00
|
|
|
|
2011-08-31 01:45:40 +00:00
|
|
|
ops.mode = MTD_OPS_RAW;
|
2006-05-29 22:37:34 +00:00
|
|
|
ops.datbuf = kbuf;
|
|
|
|
ops.oobbuf = NULL;
|
|
|
|
ops.len = len;
|
|
|
|
|
2011-12-23 16:27:05 +00:00
|
|
|
ret = mtd_read_oob(mtd, *ppos, &ops);
|
2006-05-29 22:37:34 +00:00
|
|
|
retlen = ops.retlen;
|
|
|
|
break;
|
|
|
|
}
|
2005-02-08 17:45:55 +00:00
|
|
|
default:
|
2011-12-23 15:30:16 +00:00
|
|
|
ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
|
2005-02-08 17:45:55 +00:00
|
|
|
}
|
2011-06-23 21:12:08 +00:00
|
|
|
/* Nand returns -EBADMSG on ECC errors, but it returns
|
2005-04-16 22:20:36 +00:00
|
|
|
* the data. For our userspace tools it is important
|
2011-06-23 21:12:08 +00:00
|
|
|
* to dump areas with ECC errors!
|
2006-05-29 12:56:39 +00:00
|
|
|
* For kernel internal usage it also might return -EUCLEAN
|
2011-03-31 01:57:33 +00:00
|
|
|
* to signal the caller that a bitflip has occurred and has
|
2006-05-29 12:56:39 +00:00
|
|
|
* been corrected by the ECC algorithm.
|
2005-04-16 22:20:36 +00:00
|
|
|
* Userspace software which accesses NAND this way
|
|
|
|
* must be aware of the fact that it deals with NAND
|
|
|
|
*/
|
2011-09-21 01:34:25 +00:00
|
|
|
if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
*ppos += retlen;
|
|
|
|
if (copy_to_user(buf, kbuf, retlen)) {
|
2006-05-28 09:01:53 +00:00
|
|
|
kfree(kbuf);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
total_retlen += retlen;
|
|
|
|
|
|
|
|
count -= retlen;
|
|
|
|
buf += retlen;
|
2005-02-08 17:45:55 +00:00
|
|
|
if (retlen == 0)
|
|
|
|
count = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
kfree(kbuf);
|
|
|
|
return ret;
|
|
|
|
}
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-04-17 16:38:15 +00:00
|
|
|
kfree(kbuf);
|
2005-04-16 22:20:36 +00:00
|
|
|
return total_retlen;
|
2011-12-23 15:27:46 +00:00
|
|
|
} /* mtdchar_read */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
|
|
|
|
loff_t *ppos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2011-04-08 15:51:33 +00:00
|
|
|
size_t size = count;
|
2005-04-16 22:20:36 +00:00
|
|
|
char *kbuf;
|
|
|
|
size_t retlen;
|
|
|
|
size_t total_retlen=0;
|
|
|
|
int ret=0;
|
|
|
|
int len;
|
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_write\n");
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2018-07-07 03:37:22 +00:00
|
|
|
if (*ppos >= mtd->size)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -ENOSPC;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (*ppos + count > mtd->size)
|
|
|
|
count = mtd->size - *ppos;
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
|
|
|
|
2011-04-08 15:51:33 +00:00
|
|
|
kbuf = mtd_kmalloc_up_to(mtd, &size);
|
2006-04-17 16:38:15 +00:00
|
|
|
if (!kbuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
while (count) {
|
2011-04-08 15:51:33 +00:00
|
|
|
len = min_t(size_t, count, size);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (copy_from_user(kbuf, buf, len)) {
|
|
|
|
kfree(kbuf);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
switch (mfi->mode) {
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_OTP_FACTORY:
|
2005-02-08 17:45:55 +00:00
|
|
|
ret = -EROFS;
|
|
|
|
break;
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_OTP_USER:
|
2011-12-23 16:50:04 +00:00
|
|
|
ret = mtd_write_user_prot_reg(mtd, *ppos, len,
|
|
|
|
&retlen, kbuf);
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
2006-05-29 22:37:34 +00:00
|
|
|
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_RAW:
|
2006-05-29 22:37:34 +00:00
|
|
|
{
|
2019-09-19 19:06:21 +00:00
|
|
|
struct mtd_oob_ops ops = {};
|
2006-05-29 22:37:34 +00:00
|
|
|
|
2011-08-31 01:45:40 +00:00
|
|
|
ops.mode = MTD_OPS_RAW;
|
2006-05-29 22:37:34 +00:00
|
|
|
ops.datbuf = kbuf;
|
|
|
|
ops.oobbuf = NULL;
|
2011-06-06 13:50:58 +00:00
|
|
|
ops.ooboffs = 0;
|
2006-05-29 22:37:34 +00:00
|
|
|
ops.len = len;
|
|
|
|
|
2011-12-23 16:29:55 +00:00
|
|
|
ret = mtd_write_oob(mtd, *ppos, &ops);
|
2006-05-29 22:37:34 +00:00
|
|
|
retlen = ops.retlen;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-02-08 17:45:55 +00:00
|
|
|
default:
|
2011-12-23 15:35:41 +00:00
|
|
|
ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
|
2005-02-08 17:45:55 +00:00
|
|
|
}
|
2014-03-06 11:42:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return -ENOSPC only if no data could be written at all.
|
|
|
|
* Otherwise just return the number of bytes that actually
|
|
|
|
* have been written.
|
|
|
|
*/
|
|
|
|
if ((ret == -ENOSPC) && (total_retlen))
|
|
|
|
break;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!ret) {
|
|
|
|
*ppos += retlen;
|
|
|
|
total_retlen += retlen;
|
|
|
|
count -= retlen;
|
|
|
|
buf += retlen;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
kfree(kbuf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-17 16:38:15 +00:00
|
|
|
kfree(kbuf);
|
2005-04-16 22:20:36 +00:00
|
|
|
return total_retlen;
|
2011-12-23 15:27:46 +00:00
|
|
|
} /* mtdchar_write */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*======================================================================
|
|
|
|
|
|
|
|
IOCTL calls for getting device parameters.
|
|
|
|
|
|
|
|
======================================================================*/
|
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
|
|
|
|
{
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2011-12-29 08:06:32 +00:00
|
|
|
size_t retlen;
|
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
switch (mode) {
|
|
|
|
case MTD_OTP_FACTORY:
|
2013-03-04 16:35:24 +00:00
|
|
|
if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
|
|
|
|
-EOPNOTSUPP)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2011-12-29 08:06:32 +00:00
|
|
|
mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
|
2006-05-29 22:37:34 +00:00
|
|
|
break;
|
|
|
|
case MTD_OTP_USER:
|
2013-03-04 16:35:24 +00:00
|
|
|
if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
|
|
|
|
-EOPNOTSUPP)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2011-12-29 08:06:32 +00:00
|
|
|
mfi->mode = MTD_FILE_MODE_OTP_USER;
|
2006-05-29 22:37:34 +00:00
|
|
|
break;
|
|
|
|
case MTD_OTP_OFF:
|
2013-03-04 16:35:24 +00:00
|
|
|
mfi->mode = MTD_FILE_MODE_NORMAL;
|
2006-05-29 22:37:34 +00:00
|
|
|
break;
|
2013-03-04 16:35:24 +00:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
2006-05-29 22:37:34 +00:00
|
|
|
}
|
2013-03-04 16:35:24 +00:00
|
|
|
|
|
|
|
return 0;
|
2006-05-29 22:37:34 +00:00
|
|
|
}
|
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
|
2009-04-09 05:53:13 +00:00
|
|
|
uint64_t start, uint32_t length, void __user *ptr,
|
|
|
|
uint32_t __user *retp)
|
|
|
|
{
|
2020-01-14 09:09:52 +00:00
|
|
|
struct mtd_info *master = mtd_get_master(mtd);
|
2011-08-31 01:45:37 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
2019-09-19 19:06:21 +00:00
|
|
|
struct mtd_oob_ops ops = {};
|
2009-04-09 05:53:13 +00:00
|
|
|
uint32_t retlen;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (length > 4096)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-01-14 09:09:52 +00:00
|
|
|
if (!master->_write_oob)
|
2017-09-29 17:55:19 +00:00
|
|
|
return -EOPNOTSUPP;
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
ops.ooblen = length;
|
mtd: do not assume oobsize is power of 2
Previous generations of MTDs all used OOB sizes that were powers of 2,
(e.g., 64, 128). However, newer generations of flash, especially NAND,
use irregular OOB sizes that are not powers of 2 (e.g., 218, 224, 448).
This means we cannot use masks like "mtd->oobsize - 1" to assume that we
will get a proper bitmask for OOB operations.
These masks are really only intended to hide the "page" portion of the
offset, leaving any OOB offset intact, so a masking with the writesize
(which *is* always a power of 2) is valid and makes more sense.
This has been tested for read/write of NAND devices (nanddump/nandwrite)
using nandsim and actual NAND flash.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
2011-08-24 00:17:32 +00:00
|
|
|
ops.ooboffs = start & (mtd->writesize - 1);
|
2009-04-09 05:53:13 +00:00
|
|
|
ops.datbuf = NULL;
|
2011-08-31 01:45:41 +00:00
|
|
|
ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
|
2011-08-31 01:45:40 +00:00
|
|
|
MTD_OPS_PLACE_OOB;
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-05-22 08:22:49 +00:00
|
|
|
ops.oobbuf = memdup_user(ptr, length);
|
|
|
|
if (IS_ERR(ops.oobbuf))
|
|
|
|
return PTR_ERR(ops.oobbuf);
|
2009-04-09 05:53:13 +00:00
|
|
|
|
mtd: do not assume oobsize is power of 2
Previous generations of MTDs all used OOB sizes that were powers of 2,
(e.g., 64, 128). However, newer generations of flash, especially NAND,
use irregular OOB sizes that are not powers of 2 (e.g., 218, 224, 448).
This means we cannot use masks like "mtd->oobsize - 1" to assume that we
will get a proper bitmask for OOB operations.
These masks are really only intended to hide the "page" portion of the
offset, leaving any OOB offset intact, so a masking with the writesize
(which *is* always a power of 2) is valid and makes more sense.
This has been tested for read/write of NAND devices (nanddump/nandwrite)
using nandsim and actual NAND flash.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
2011-08-24 00:17:32 +00:00
|
|
|
start &= ~((uint64_t)mtd->writesize - 1);
|
2011-12-23 16:29:55 +00:00
|
|
|
ret = mtd_write_oob(mtd, start, &ops);
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
if (ops.oobretlen > 0xFFFFFFFFU)
|
|
|
|
ret = -EOVERFLOW;
|
|
|
|
retlen = ops.oobretlen;
|
|
|
|
if (copy_to_user(retp, &retlen, sizeof(length)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
kfree(ops.oobbuf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
|
2011-08-31 01:45:38 +00:00
|
|
|
uint64_t start, uint32_t length, void __user *ptr,
|
|
|
|
uint32_t __user *retp)
|
2009-04-09 05:53:13 +00:00
|
|
|
{
|
2011-08-31 01:45:38 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
2019-09-19 19:06:21 +00:00
|
|
|
struct mtd_oob_ops ops = {};
|
2009-04-09 05:53:13 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (length > 4096)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ops.ooblen = length;
|
mtd: do not assume oobsize is power of 2
Previous generations of MTDs all used OOB sizes that were powers of 2,
(e.g., 64, 128). However, newer generations of flash, especially NAND,
use irregular OOB sizes that are not powers of 2 (e.g., 218, 224, 448).
This means we cannot use masks like "mtd->oobsize - 1" to assume that we
will get a proper bitmask for OOB operations.
These masks are really only intended to hide the "page" portion of the
offset, leaving any OOB offset intact, so a masking with the writesize
(which *is* always a power of 2) is valid and makes more sense.
This has been tested for read/write of NAND devices (nanddump/nandwrite)
using nandsim and actual NAND flash.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
2011-08-24 00:17:32 +00:00
|
|
|
ops.ooboffs = start & (mtd->writesize - 1);
|
2009-04-09 05:53:13 +00:00
|
|
|
ops.datbuf = NULL;
|
2011-08-31 01:45:41 +00:00
|
|
|
ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
|
2011-08-31 01:45:40 +00:00
|
|
|
MTD_OPS_PLACE_OOB;
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ops.oobbuf = kmalloc(length, GFP_KERNEL);
|
|
|
|
if (!ops.oobbuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
mtd: do not assume oobsize is power of 2
Previous generations of MTDs all used OOB sizes that were powers of 2,
(e.g., 64, 128). However, newer generations of flash, especially NAND,
use irregular OOB sizes that are not powers of 2 (e.g., 218, 224, 448).
This means we cannot use masks like "mtd->oobsize - 1" to assume that we
will get a proper bitmask for OOB operations.
These masks are really only intended to hide the "page" portion of the
offset, leaving any OOB offset intact, so a masking with the writesize
(which *is* always a power of 2) is valid and makes more sense.
This has been tested for read/write of NAND devices (nanddump/nandwrite)
using nandsim and actual NAND flash.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
2011-08-24 00:17:32 +00:00
|
|
|
start &= ~((uint64_t)mtd->writesize - 1);
|
2011-12-23 16:27:05 +00:00
|
|
|
ret = mtd_read_oob(mtd, start, &ops);
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
if (put_user(ops.oobretlen, retp))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
|
|
|
|
ops.oobretlen))
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
kfree(ops.oobbuf);
|
2011-06-23 23:45:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NAND returns -EBADMSG on ECC errors, but it returns the OOB
|
|
|
|
* data. For our userspace tools it is important to dump areas
|
|
|
|
* with ECC errors!
|
|
|
|
* For kernel internal usage it also might return -EUCLEAN
|
2017-01-27 01:36:38 +00:00
|
|
|
* to signal the caller that a bitflip has occurred and has
|
2011-06-23 23:45:24 +00:00
|
|
|
* been corrected by the ECC algorithm.
|
|
|
|
*
|
2011-06-28 23:29:00 +00:00
|
|
|
* Note: currently the standard NAND function, nand_read_oob_std,
|
|
|
|
* does not calculate ECC for the OOB area, so do not rely on
|
|
|
|
* this behavior unless you have replaced it with your own.
|
2011-06-23 23:45:24 +00:00
|
|
|
*/
|
2011-09-21 01:34:25 +00:00
|
|
|
if (mtd_is_bitflip_or_eccerr(ret))
|
2011-06-23 23:45:24 +00:00
|
|
|
return 0;
|
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-08-25 01:12:00 +00:00
|
|
|
/*
|
2016-02-04 09:16:18 +00:00
|
|
|
* Copies (and truncates, if necessary) OOB layout information to the
|
|
|
|
* deprecated layout struct, nand_ecclayout_user. This is necessary only to
|
|
|
|
* support the deprecated API ioctl ECCGETLAYOUT while allowing all new
|
|
|
|
* functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
|
|
|
|
* can describe any kind of OOB layout with almost zero overhead from a
|
|
|
|
* memory usage point of view).
|
2010-08-25 01:12:00 +00:00
|
|
|
*/
|
2016-02-03 19:10:30 +00:00
|
|
|
static int shrink_ecclayout(struct mtd_info *mtd,
|
|
|
|
struct nand_ecclayout_user *to)
|
2010-08-25 01:12:00 +00:00
|
|
|
{
|
2016-02-03 19:10:30 +00:00
|
|
|
struct mtd_oob_region oobregion;
|
|
|
|
int i, section = 0, ret;
|
2010-08-25 01:12:00 +00:00
|
|
|
|
2016-02-03 19:10:30 +00:00
|
|
|
if (!mtd || !to)
|
2010-08-25 01:12:00 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memset(to, 0, sizeof(*to));
|
|
|
|
|
2016-02-03 19:10:30 +00:00
|
|
|
to->eccbytes = 0;
|
|
|
|
for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
|
|
|
|
u32 eccpos;
|
|
|
|
|
2018-03-11 07:59:07 +00:00
|
|
|
ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
|
2016-02-03 19:10:30 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
if (ret != -ERANGE)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
eccpos = oobregion.offset;
|
|
|
|
for (; i < MTD_MAX_ECCPOS_ENTRIES &&
|
|
|
|
eccpos < oobregion.offset + oobregion.length; i++) {
|
|
|
|
to->eccpos[i] = eccpos++;
|
|
|
|
to->eccbytes++;
|
|
|
|
}
|
|
|
|
}
|
2010-08-25 01:12:00 +00:00
|
|
|
|
|
|
|
for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
|
2016-02-03 19:10:30 +00:00
|
|
|
ret = mtd_ooblayout_free(mtd, i, &oobregion);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (ret != -ERANGE)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
to->oobfree[i].offset = oobregion.offset;
|
|
|
|
to->oobfree[i].length = oobregion.length;
|
|
|
|
to->oobavail += to->oobfree[i].length;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
|
|
|
|
{
|
|
|
|
struct mtd_oob_region oobregion;
|
|
|
|
int i, section = 0, ret;
|
|
|
|
|
|
|
|
if (!mtd || !to)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memset(to, 0, sizeof(*to));
|
|
|
|
|
|
|
|
to->eccbytes = 0;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
|
|
|
|
u32 eccpos;
|
|
|
|
|
2018-03-11 07:59:07 +00:00
|
|
|
ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
|
2016-02-03 19:10:30 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
if (ret != -ERANGE)
|
|
|
|
return ret;
|
|
|
|
|
2010-08-25 01:12:00 +00:00
|
|
|
break;
|
2016-02-03 19:10:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
eccpos = oobregion.offset;
|
|
|
|
for (; eccpos < oobregion.offset + oobregion.length; i++) {
|
|
|
|
to->eccpos[i] = eccpos++;
|
|
|
|
to->eccbytes++;
|
|
|
|
}
|
2010-08-25 01:12:00 +00:00
|
|
|
}
|
|
|
|
|
2016-02-03 19:10:30 +00:00
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
ret = mtd_ooblayout_free(mtd, i, &oobregion);
|
|
|
|
if (ret < 0) {
|
|
|
|
if (ret != -ERANGE)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
to->oobfree[i][0] = oobregion.offset;
|
|
|
|
to->oobfree[i][1] = oobregion.length;
|
|
|
|
}
|
|
|
|
|
|
|
|
to->useecc = MTD_NANDECC_AUTOPLACE;
|
|
|
|
|
2010-08-25 01:12:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
|
2015-09-21 20:26:59 +00:00
|
|
|
struct blkpg_ioctl_arg *arg)
|
2010-09-17 10:31:42 +00:00
|
|
|
{
|
|
|
|
struct blkpg_partition p;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2015-09-21 20:26:59 +00:00
|
|
|
if (copy_from_user(&p, arg->data, sizeof(p)))
|
2010-09-17 10:31:42 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2015-09-21 20:26:59 +00:00
|
|
|
switch (arg->op) {
|
2010-09-17 10:31:42 +00:00
|
|
|
case BLKPG_ADD_PARTITION:
|
|
|
|
|
2010-11-23 12:17:17 +00:00
|
|
|
/* Only master mtd device must be used to add partitions */
|
|
|
|
if (mtd_is_partition(mtd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-07-22 02:08:13 +00:00
|
|
|
/* Sanitize user input */
|
|
|
|
p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
|
|
|
|
|
2010-09-17 10:31:42 +00:00
|
|
|
return mtd_add_partition(mtd, p.devname, p.start, p.length);
|
|
|
|
|
|
|
|
case BLKPG_DEL_PARTITION:
|
|
|
|
|
|
|
|
if (p.pno < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return mtd_del_partition(mtd, p.pno);
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-30 11:31:49 +00:00
|
|
|
static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
|
|
|
|
struct mtd_oob_ops *ops)
|
|
|
|
{
|
|
|
|
uint32_t start_page, end_page;
|
|
|
|
u32 oob_per_page;
|
|
|
|
|
|
|
|
if (ops->len == 0 || ops->ooblen == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
start_page = mtd_div_by_ws(start, mtd);
|
|
|
|
end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
|
|
|
|
oob_per_page = mtd_oobavail(mtd, ops);
|
|
|
|
|
|
|
|
ops->ooblen = min_t(size_t, ops->ooblen,
|
|
|
|
(end_page - start_page + 1) * oob_per_page);
|
|
|
|
}
|
|
|
|
|
2023-04-17 20:56:50 +00:00
|
|
|
static noinline_for_stack int
|
|
|
|
mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
|
2011-09-09 16:59:03 +00:00
|
|
|
{
|
2020-01-14 09:09:52 +00:00
|
|
|
struct mtd_info *master = mtd_get_master(mtd);
|
2011-09-09 16:59:03 +00:00
|
|
|
struct mtd_write_req req;
|
2014-05-01 18:40:54 +00:00
|
|
|
const void __user *usr_data, *usr_oob;
|
2021-11-30 11:31:49 +00:00
|
|
|
uint8_t *datbuf = NULL, *oobbuf = NULL;
|
|
|
|
size_t datbuf_len, oobbuf_len;
|
|
|
|
int ret = 0;
|
2011-09-09 16:59:03 +00:00
|
|
|
|
2014-05-01 18:40:54 +00:00
|
|
|
if (copy_from_user(&req, argp, sizeof(req)))
|
2011-09-09 16:59:03 +00:00
|
|
|
return -EFAULT;
|
2014-05-01 18:40:54 +00:00
|
|
|
|
|
|
|
usr_data = (const void __user *)(uintptr_t)req.usr_data;
|
|
|
|
usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
|
|
|
|
|
2020-01-14 09:09:52 +00:00
|
|
|
if (!master->_write_oob)
|
2011-09-09 16:59:03 +00:00
|
|
|
return -EOPNOTSUPP;
|
2021-11-30 11:31:49 +00:00
|
|
|
|
|
|
|
if (!usr_data)
|
|
|
|
req.len = 0;
|
|
|
|
|
|
|
|
if (!usr_oob)
|
|
|
|
req.ooblen = 0;
|
|
|
|
|
2022-05-16 07:06:00 +00:00
|
|
|
req.len &= 0xffffffff;
|
|
|
|
req.ooblen &= 0xffffffff;
|
|
|
|
|
2021-11-30 11:31:49 +00:00
|
|
|
if (req.start + req.len > mtd->size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
|
|
|
|
if (datbuf_len > 0) {
|
2022-05-16 07:06:01 +00:00
|
|
|
datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
|
2021-11-30 11:31:49 +00:00
|
|
|
if (!datbuf)
|
|
|
|
return -ENOMEM;
|
2011-09-09 16:59:03 +00:00
|
|
|
}
|
|
|
|
|
2021-11-30 11:31:49 +00:00
|
|
|
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
|
|
|
|
if (oobbuf_len > 0) {
|
2022-05-16 07:06:01 +00:00
|
|
|
oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
|
2021-11-30 11:31:49 +00:00
|
|
|
if (!oobbuf) {
|
2022-05-16 07:06:01 +00:00
|
|
|
kvfree(datbuf);
|
2021-11-30 11:31:49 +00:00
|
|
|
return -ENOMEM;
|
2011-09-09 16:59:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-30 11:31:49 +00:00
|
|
|
while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
|
|
|
|
struct mtd_oob_ops ops = {
|
|
|
|
.mode = req.mode,
|
|
|
|
.len = min_t(size_t, req.len, datbuf_len),
|
|
|
|
.ooblen = min_t(size_t, req.ooblen, oobbuf_len),
|
|
|
|
.datbuf = datbuf,
|
|
|
|
.oobbuf = oobbuf,
|
|
|
|
};
|
2011-09-09 16:59:03 +00:00
|
|
|
|
2021-11-30 11:31:49 +00:00
|
|
|
/*
|
|
|
|
* Shorten non-page-aligned, eraseblock-sized writes so that
|
|
|
|
* the write ends on an eraseblock boundary. This is necessary
|
|
|
|
* for adjust_oob_length() to properly handle non-page-aligned
|
|
|
|
* writes.
|
|
|
|
*/
|
|
|
|
if (ops.len == mtd->erasesize)
|
|
|
|
ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For writes which are not OOB-only, adjust the amount of OOB
|
|
|
|
* data written according to the number of data pages written.
|
|
|
|
* This is necessary to prevent OOB data from being skipped
|
|
|
|
* over in data+OOB writes requiring multiple mtd_write_oob()
|
|
|
|
* calls to be completed.
|
|
|
|
*/
|
|
|
|
adjust_oob_length(mtd, req.start, &ops);
|
|
|
|
|
|
|
|
if (copy_from_user(datbuf, usr_data, ops.len) ||
|
|
|
|
copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = mtd_write_oob(mtd, req.start, &ops);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
req.start += ops.retlen;
|
|
|
|
req.len -= ops.retlen;
|
|
|
|
usr_data += ops.retlen;
|
|
|
|
|
|
|
|
req.ooblen -= ops.oobretlen;
|
|
|
|
usr_oob += ops.oobretlen;
|
|
|
|
}
|
|
|
|
|
2022-05-16 07:06:01 +00:00
|
|
|
kvfree(datbuf);
|
|
|
|
kvfree(oobbuf);
|
2011-09-09 16:59:03 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-17 20:56:50 +00:00
|
|
|
static noinline_for_stack int
|
|
|
|
mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
|
mtdchar: add MEMREAD ioctl
User-space applications making use of MTD devices via /dev/mtd*
character devices currently have limited capabilities for reading data:
- only deprecated methods of accessing OOB layout information exist,
- there is no way to explicitly specify MTD operation mode to use; it
is auto-selected based on the MTD file mode (MTD_FILE_MODE_*) set
for the character device; in particular, this prevents using
MTD_OPS_AUTO_OOB for reads,
- all existing user-space interfaces which cause mtd_read() or
mtd_read_oob() to be called (via mtdchar_read() and
mtdchar_read_oob(), respectively) return success even when those
functions return -EUCLEAN or -EBADMSG; this renders user-space
applications using these interfaces unaware of any corrected
bitflips or uncorrectable ECC errors detected during reads.
Note that the existing MEMWRITE ioctl allows the MTD operation mode to
be explicitly set, allowing user-space applications to write page data
and OOB data without requiring them to know anything about the OOB
layout of the MTD device they are writing to (MTD_OPS_AUTO_OOB). Also,
the MEMWRITE ioctl does not mangle the return value of mtd_write_oob().
Add a new ioctl, MEMREAD, which addresses the above issues. It is
intended to be a read-side counterpart of the existing MEMWRITE ioctl.
Similarly to the latter, the read operation is performed in a loop which
processes at most mtd->erasesize bytes in each iteration. This is done
to prevent unbounded memory allocations caused by calling kmalloc() with
the 'size' argument taken directly from the struct mtd_read_req provided
by user space. However, the new ioctl is implemented so that the values
it returns match those that would have been returned if just a single
mtd_read_oob() call was issued to handle the entire read operation in
one go.
Note that while just returning -EUCLEAN or -EBADMSG to user space would
already be a valid and useful indication of the ECC algorithm detecting
errors during a read operation, that signal would not be granular enough
to cover all use cases. For example, knowing the maximum number of
bitflips detected in a single ECC step during a read operation performed
on a given page may be useful when dealing with an MTD partition whose
ECC layout varies across pages (e.g. a partition consisting of a
bootloader area using a "custom" ECC layout followed by data pages using
a "standard" ECC layout). To address that, include ECC statistics in
the structure returned to user space by the new MEMREAD ioctl.
Link: https://www.infradead.org/pipermail/linux-mtd/2016-April/067085.html
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Michał Kępień <kernel@kempniu.pl>
Acked-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20220629125737.14418-5-kernel@kempniu.pl
2022-06-29 12:57:37 +00:00
|
|
|
{
|
|
|
|
struct mtd_info *master = mtd_get_master(mtd);
|
|
|
|
struct mtd_read_req req;
|
|
|
|
void __user *usr_data, *usr_oob;
|
|
|
|
uint8_t *datbuf = NULL, *oobbuf = NULL;
|
|
|
|
size_t datbuf_len, oobbuf_len;
|
|
|
|
size_t orig_len, orig_ooblen;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (copy_from_user(&req, argp, sizeof(req)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
orig_len = req.len;
|
|
|
|
orig_ooblen = req.ooblen;
|
|
|
|
|
|
|
|
usr_data = (void __user *)(uintptr_t)req.usr_data;
|
|
|
|
usr_oob = (void __user *)(uintptr_t)req.usr_oob;
|
|
|
|
|
|
|
|
if (!master->_read_oob)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!usr_data)
|
|
|
|
req.len = 0;
|
|
|
|
|
|
|
|
if (!usr_oob)
|
|
|
|
req.ooblen = 0;
|
|
|
|
|
|
|
|
req.ecc_stats.uncorrectable_errors = 0;
|
|
|
|
req.ecc_stats.corrected_bitflips = 0;
|
|
|
|
req.ecc_stats.max_bitflips = 0;
|
|
|
|
|
|
|
|
req.len &= 0xffffffff;
|
|
|
|
req.ooblen &= 0xffffffff;
|
|
|
|
|
|
|
|
if (req.start + req.len > mtd->size) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
|
|
|
|
if (datbuf_len > 0) {
|
|
|
|
datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
|
|
|
|
if (!datbuf) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
|
|
|
|
if (oobbuf_len > 0) {
|
|
|
|
oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
|
|
|
|
if (!oobbuf) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
|
|
|
|
struct mtd_req_stats stats;
|
|
|
|
struct mtd_oob_ops ops = {
|
|
|
|
.mode = req.mode,
|
|
|
|
.len = min_t(size_t, req.len, datbuf_len),
|
|
|
|
.ooblen = min_t(size_t, req.ooblen, oobbuf_len),
|
|
|
|
.datbuf = datbuf,
|
|
|
|
.oobbuf = oobbuf,
|
|
|
|
.stats = &stats,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shorten non-page-aligned, eraseblock-sized reads so that the
|
|
|
|
* read ends on an eraseblock boundary. This is necessary in
|
|
|
|
* order to prevent OOB data for some pages from being
|
|
|
|
* duplicated in the output of non-page-aligned reads requiring
|
|
|
|
* multiple mtd_read_oob() calls to be completed.
|
|
|
|
*/
|
|
|
|
if (ops.len == mtd->erasesize)
|
|
|
|
ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
|
|
|
|
|
|
|
|
ret = mtd_read_oob(mtd, (loff_t)req.start, &ops);
|
|
|
|
|
|
|
|
req.ecc_stats.uncorrectable_errors +=
|
|
|
|
stats.uncorrectable_errors;
|
|
|
|
req.ecc_stats.corrected_bitflips += stats.corrected_bitflips;
|
|
|
|
req.ecc_stats.max_bitflips =
|
|
|
|
max(req.ecc_stats.max_bitflips, stats.max_bitflips);
|
|
|
|
|
|
|
|
if (ret && !mtd_is_bitflip_or_eccerr(ret))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (copy_to_user(usr_data, ops.datbuf, ops.retlen) ||
|
|
|
|
copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
req.start += ops.retlen;
|
|
|
|
req.len -= ops.retlen;
|
|
|
|
usr_data += ops.retlen;
|
|
|
|
|
|
|
|
req.ooblen -= ops.oobretlen;
|
|
|
|
usr_oob += ops.oobretlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As multiple iterations of the above loop (and therefore multiple
|
|
|
|
* mtd_read_oob() calls) may be necessary to complete the read request,
|
|
|
|
* adjust the final return code to ensure it accounts for all detected
|
|
|
|
* ECC errors.
|
|
|
|
*/
|
|
|
|
if (!ret || mtd_is_bitflip(ret)) {
|
|
|
|
if (req.ecc_stats.uncorrectable_errors > 0)
|
|
|
|
ret = -EBADMSG;
|
|
|
|
else if (req.ecc_stats.corrected_bitflips > 0)
|
|
|
|
ret = -EUCLEAN;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
req.len = orig_len - req.len;
|
|
|
|
req.ooblen = orig_ooblen - req.ooblen;
|
|
|
|
|
|
|
|
if (copy_to_user(argp, &req, sizeof(req)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
kvfree(datbuf);
|
|
|
|
kvfree(oobbuf);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2020-01-14 09:09:52 +00:00
|
|
|
struct mtd_info *master = mtd_get_master(mtd);
|
2005-04-16 22:20:36 +00:00
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
int ret = 0;
|
2006-05-30 12:25:35 +00:00
|
|
|
struct mtd_info_user info;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_ioctl\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-07-16 11:53:46 +00:00
|
|
|
/*
|
|
|
|
* Check the file mode to require "dangerous" commands to have write
|
|
|
|
* permissions.
|
|
|
|
*/
|
|
|
|
switch (cmd) {
|
|
|
|
/* "safe" commands */
|
|
|
|
case MEMGETREGIONCOUNT:
|
|
|
|
case MEMGETREGIONINFO:
|
|
|
|
case MEMGETINFO:
|
|
|
|
case MEMREADOOB:
|
|
|
|
case MEMREADOOB64:
|
mtdchar: add MEMREAD ioctl
User-space applications making use of MTD devices via /dev/mtd*
character devices currently have limited capabilities for reading data:
- only deprecated methods of accessing OOB layout information exist,
- there is no way to explicitly specify MTD operation mode to use; it
is auto-selected based on the MTD file mode (MTD_FILE_MODE_*) set
for the character device; in particular, this prevents using
MTD_OPS_AUTO_OOB for reads,
- all existing user-space interfaces which cause mtd_read() or
mtd_read_oob() to be called (via mtdchar_read() and
mtdchar_read_oob(), respectively) return success even when those
functions return -EUCLEAN or -EBADMSG; this renders user-space
applications using these interfaces unaware of any corrected
bitflips or uncorrectable ECC errors detected during reads.
Note that the existing MEMWRITE ioctl allows the MTD operation mode to
be explicitly set, allowing user-space applications to write page data
and OOB data without requiring them to know anything about the OOB
layout of the MTD device they are writing to (MTD_OPS_AUTO_OOB). Also,
the MEMWRITE ioctl does not mangle the return value of mtd_write_oob().
Add a new ioctl, MEMREAD, which addresses the above issues. It is
intended to be a read-side counterpart of the existing MEMWRITE ioctl.
Similarly to the latter, the read operation is performed in a loop which
processes at most mtd->erasesize bytes in each iteration. This is done
to prevent unbounded memory allocations caused by calling kmalloc() with
the 'size' argument taken directly from the struct mtd_read_req provided
by user space. However, the new ioctl is implemented so that the values
it returns match those that would have been returned if just a single
mtd_read_oob() call was issued to handle the entire read operation in
one go.
Note that while just returning -EUCLEAN or -EBADMSG to user space would
already be a valid and useful indication of the ECC algorithm detecting
errors during a read operation, that signal would not be granular enough
to cover all use cases. For example, knowing the maximum number of
bitflips detected in a single ECC step during a read operation performed
on a given page may be useful when dealing with an MTD partition whose
ECC layout varies across pages (e.g. a partition consisting of a
bootloader area using a "custom" ECC layout followed by data pages using
a "standard" ECC layout). To address that, include ECC statistics in
the structure returned to user space by the new MEMREAD ioctl.
Link: https://www.infradead.org/pipermail/linux-mtd/2016-April/067085.html
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Michał Kępień <kernel@kempniu.pl>
Acked-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20220629125737.14418-5-kernel@kempniu.pl
2022-06-29 12:57:37 +00:00
|
|
|
case MEMREAD:
|
2020-07-16 11:53:46 +00:00
|
|
|
case MEMISLOCKED:
|
|
|
|
case MEMGETOOBSEL:
|
|
|
|
case MEMGETBADBLOCK:
|
|
|
|
case OTPSELECT:
|
|
|
|
case OTPGETREGIONCOUNT:
|
|
|
|
case OTPGETREGIONINFO:
|
|
|
|
case ECCGETLAYOUT:
|
|
|
|
case ECCGETSTATS:
|
|
|
|
case MTDFILEMODE:
|
|
|
|
case BLKPG:
|
|
|
|
case BLKRRPART:
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* "dangerous" commands */
|
|
|
|
case MEMERASE:
|
|
|
|
case MEMERASE64:
|
2021-03-03 15:57:35 +00:00
|
|
|
case MEMLOCK:
|
|
|
|
case MEMUNLOCK:
|
|
|
|
case MEMSETBADBLOCK:
|
2020-07-16 11:53:46 +00:00
|
|
|
case MEMWRITEOOB:
|
|
|
|
case MEMWRITEOOB64:
|
|
|
|
case MEMWRITE:
|
2021-03-03 15:57:35 +00:00
|
|
|
case OTPLOCK:
|
2021-03-03 20:18:19 +00:00
|
|
|
case OTPERASE:
|
2020-07-16 11:53:46 +00:00
|
|
|
if (!(file->f_mode & FMODE_WRITE))
|
|
|
|
return -EPERM;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -ENOTTY;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case MEMGETREGIONCOUNT:
|
|
|
|
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
|
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MEMGETREGIONINFO:
|
|
|
|
{
|
2008-09-01 12:02:12 +00:00
|
|
|
uint32_t ur_idx;
|
|
|
|
struct mtd_erase_region_info *kr;
|
2010-01-15 18:25:38 +00:00
|
|
|
struct region_info_user __user *ur = argp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-09-01 12:02:12 +00:00
|
|
|
if (get_user(ur_idx, &(ur->regionindex)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2010-09-08 19:39:56 +00:00
|
|
|
if (ur_idx >= mtd->numeraseregions)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-09-01 12:02:12 +00:00
|
|
|
kr = &(mtd->eraseregions[ur_idx]);
|
|
|
|
|
|
|
|
if (put_user(kr->offset, &(ur->offset))
|
|
|
|
|| put_user(kr->erasesize, &(ur->erasesize))
|
|
|
|
|| put_user(kr->numblocks, &(ur->numblocks)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
2008-09-01 12:02:12 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMGETINFO:
|
2010-11-06 14:41:24 +00:00
|
|
|
memset(&info, 0, sizeof(info));
|
2006-05-30 12:25:35 +00:00
|
|
|
info.type = mtd->type;
|
|
|
|
info.flags = mtd->flags;
|
|
|
|
info.size = mtd->size;
|
|
|
|
info.erasesize = mtd->erasesize;
|
|
|
|
info.writesize = mtd->writesize;
|
|
|
|
info.oobsize = mtd->oobsize;
|
2011-08-31 01:45:46 +00:00
|
|
|
/* The below field is obsolete */
|
|
|
|
info.padding = 0;
|
2006-05-30 12:25:35 +00:00
|
|
|
if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MEMERASE:
|
2009-04-09 05:52:28 +00:00
|
|
|
case MEMERASE64:
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct erase_info *erase;
|
|
|
|
|
2006-11-15 19:10:29 +00:00
|
|
|
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!erase)
|
|
|
|
ret = -ENOMEM;
|
|
|
|
else {
|
2009-04-09 05:52:28 +00:00
|
|
|
if (cmd == MEMERASE64) {
|
|
|
|
struct erase_info_user64 einfo64;
|
|
|
|
|
|
|
|
if (copy_from_user(&einfo64, argp,
|
|
|
|
sizeof(struct erase_info_user64))) {
|
|
|
|
kfree(erase);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
erase->addr = einfo64.start;
|
|
|
|
erase->len = einfo64.length;
|
|
|
|
} else {
|
|
|
|
struct erase_info_user einfo32;
|
|
|
|
|
|
|
|
if (copy_from_user(&einfo32, argp,
|
|
|
|
sizeof(struct erase_info_user))) {
|
|
|
|
kfree(erase);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
erase->addr = einfo32.start;
|
|
|
|
erase->len = einfo32.length;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2018-02-12 21:03:09 +00:00
|
|
|
|
2011-12-23 13:25:39 +00:00
|
|
|
ret = mtd_erase(mtd, erase);
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(erase);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMWRITEOOB:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf buf;
|
2009-04-09 05:53:13 +00:00
|
|
|
struct mtd_oob_buf __user *buf_user = argp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
/* NOTE: writes return length to buf_user->length */
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = -EFAULT;
|
2009-04-09 05:53:13 +00:00
|
|
|
else
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
|
2009-04-09 05:53:13 +00:00
|
|
|
buf.ptr, &buf_user->length);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMREADOOB:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf buf;
|
2009-04-09 05:53:13 +00:00
|
|
|
struct mtd_oob_buf __user *buf_user = argp;
|
2006-05-29 01:26:58 +00:00
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
/* NOTE: writes return length to buf_user->start */
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = -EFAULT;
|
2009-04-09 05:53:13 +00:00
|
|
|
else
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
|
2009-04-09 05:53:13 +00:00
|
|
|
buf.ptr, &buf_user->start);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-04-09 05:53:49 +00:00
|
|
|
case MEMWRITEOOB64:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf64 buf;
|
|
|
|
struct mtd_oob_buf64 __user *buf_user = argp;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
|
2009-04-09 05:53:49 +00:00
|
|
|
(void __user *)(uintptr_t)buf.usr_ptr,
|
|
|
|
&buf_user->length);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMREADOOB64:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf64 buf;
|
|
|
|
struct mtd_oob_buf64 __user *buf_user = argp;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
|
2009-04-09 05:53:49 +00:00
|
|
|
(void __user *)(uintptr_t)buf.usr_ptr,
|
|
|
|
&buf_user->length);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-09-09 16:59:03 +00:00
|
|
|
case MEMWRITE:
|
|
|
|
{
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_write_ioctl(mtd,
|
2011-09-09 16:59:03 +00:00
|
|
|
(struct mtd_write_req __user *)arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
mtdchar: add MEMREAD ioctl
User-space applications making use of MTD devices via /dev/mtd*
character devices currently have limited capabilities for reading data:
- only deprecated methods of accessing OOB layout information exist,
- there is no way to explicitly specify MTD operation mode to use; it
is auto-selected based on the MTD file mode (MTD_FILE_MODE_*) set
for the character device; in particular, this prevents using
MTD_OPS_AUTO_OOB for reads,
- all existing user-space interfaces which cause mtd_read() or
mtd_read_oob() to be called (via mtdchar_read() and
mtdchar_read_oob(), respectively) return success even when those
functions return -EUCLEAN or -EBADMSG; this renders user-space
applications using these interfaces unaware of any corrected
bitflips or uncorrectable ECC errors detected during reads.
Note that the existing MEMWRITE ioctl allows the MTD operation mode to
be explicitly set, allowing user-space applications to write page data
and OOB data without requiring them to know anything about the OOB
layout of the MTD device they are writing to (MTD_OPS_AUTO_OOB). Also,
the MEMWRITE ioctl does not mangle the return value of mtd_write_oob().
Add a new ioctl, MEMREAD, which addresses the above issues. It is
intended to be a read-side counterpart of the existing MEMWRITE ioctl.
Similarly to the latter, the read operation is performed in a loop which
processes at most mtd->erasesize bytes in each iteration. This is done
to prevent unbounded memory allocations caused by calling kmalloc() with
the 'size' argument taken directly from the struct mtd_read_req provided
by user space. However, the new ioctl is implemented so that the values
it returns match those that would have been returned if just a single
mtd_read_oob() call was issued to handle the entire read operation in
one go.
Note that while just returning -EUCLEAN or -EBADMSG to user space would
already be a valid and useful indication of the ECC algorithm detecting
errors during a read operation, that signal would not be granular enough
to cover all use cases. For example, knowing the maximum number of
bitflips detected in a single ECC step during a read operation performed
on a given page may be useful when dealing with an MTD partition whose
ECC layout varies across pages (e.g. a partition consisting of a
bootloader area using a "custom" ECC layout followed by data pages using
a "standard" ECC layout). To address that, include ECC statistics in
the structure returned to user space by the new MEMREAD ioctl.
Link: https://www.infradead.org/pipermail/linux-mtd/2016-April/067085.html
Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Michał Kępień <kernel@kempniu.pl>
Acked-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20220629125737.14418-5-kernel@kempniu.pl
2022-06-29 12:57:37 +00:00
|
|
|
case MEMREAD:
|
|
|
|
{
|
|
|
|
ret = mtdchar_read_ioctl(mtd,
|
|
|
|
(struct mtd_read_req __user *)arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case MEMLOCK:
|
|
|
|
{
|
2008-07-04 06:40:14 +00:00
|
|
|
struct erase_info_user einfo;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-04 06:40:14 +00:00
|
|
|
if (copy_from_user(&einfo, argp, sizeof(einfo)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2011-12-30 15:00:35 +00:00
|
|
|
ret = mtd_lock(mtd, einfo.start, einfo.length);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMUNLOCK:
|
|
|
|
{
|
2008-07-04 06:40:14 +00:00
|
|
|
struct erase_info_user einfo;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-04 06:40:14 +00:00
|
|
|
if (copy_from_user(&einfo, argp, sizeof(einfo)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2011-12-30 15:00:35 +00:00
|
|
|
ret = mtd_unlock(mtd, einfo.start, einfo.length);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-06-14 16:10:33 +00:00
|
|
|
case MEMISLOCKED:
|
|
|
|
{
|
|
|
|
struct erase_info_user einfo;
|
|
|
|
|
|
|
|
if (copy_from_user(&einfo, argp, sizeof(einfo)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2011-12-30 15:00:35 +00:00
|
|
|
ret = mtd_is_locked(mtd, einfo.start, einfo.length);
|
2010-06-14 16:10:33 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-05-27 20:16:10 +00:00
|
|
|
/* Legacy interface */
|
2005-04-16 22:20:36 +00:00
|
|
|
case MEMGETOOBSEL:
|
|
|
|
{
|
2006-05-27 20:16:10 +00:00
|
|
|
struct nand_oobinfo oi;
|
|
|
|
|
2020-01-14 09:09:52 +00:00
|
|
|
if (!master->ooblayout)
|
2006-05-27 20:16:10 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2016-02-03 19:10:30 +00:00
|
|
|
ret = get_oobinfo(mtd, &oi);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2006-05-27 20:16:10 +00:00
|
|
|
|
|
|
|
if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMGETBADBLOCK:
|
|
|
|
{
|
|
|
|
loff_t offs;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (copy_from_user(&offs, argp, sizeof(loff_t)))
|
|
|
|
return -EFAULT;
|
2012-01-02 11:48:54 +00:00
|
|
|
return mtd_block_isbad(mtd, offs);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case MEMSETBADBLOCK:
|
|
|
|
{
|
|
|
|
loff_t offs;
|
|
|
|
|
|
|
|
if (copy_from_user(&offs, argp, sizeof(loff_t)))
|
|
|
|
return -EFAULT;
|
2012-01-02 11:59:12 +00:00
|
|
|
return mtd_block_markbad(mtd, offs);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-02-08 17:45:55 +00:00
|
|
|
case OTPSELECT:
|
|
|
|
{
|
|
|
|
int mode;
|
|
|
|
if (copy_from_user(&mode, argp, sizeof(int)))
|
|
|
|
return -EFAULT;
|
2006-05-29 22:37:34 +00:00
|
|
|
|
2011-08-31 01:45:41 +00:00
|
|
|
mfi->mode = MTD_FILE_MODE_NORMAL;
|
2006-05-29 22:37:34 +00:00
|
|
|
|
|
|
|
ret = otp_select_filemode(mfi, mode);
|
|
|
|
|
2005-04-01 15:36:15 +00:00
|
|
|
file->f_pos = 0;
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OTPGETREGIONCOUNT:
|
|
|
|
case OTPGETREGIONINFO:
|
|
|
|
{
|
|
|
|
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
|
2014-01-28 08:29:44 +00:00
|
|
|
size_t retlen;
|
2005-02-08 17:45:55 +00:00
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
2006-05-29 22:37:34 +00:00
|
|
|
switch (mfi->mode) {
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_OTP_FACTORY:
|
2014-01-28 08:29:44 +00:00
|
|
|
ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_OTP_USER:
|
2014-01-28 08:29:44 +00:00
|
|
|
ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
2006-05-29 22:37:34 +00:00
|
|
|
default:
|
2011-12-28 16:47:46 +00:00
|
|
|
ret = -EINVAL;
|
2006-05-29 22:37:34 +00:00
|
|
|
break;
|
2005-02-08 17:45:55 +00:00
|
|
|
}
|
2014-01-28 08:29:44 +00:00
|
|
|
if (!ret) {
|
2005-02-08 17:45:55 +00:00
|
|
|
if (cmd == OTPGETREGIONCOUNT) {
|
2014-01-28 08:29:44 +00:00
|
|
|
int nbr = retlen / sizeof(struct otp_info);
|
2005-02-08 17:45:55 +00:00
|
|
|
ret = copy_to_user(argp, &nbr, sizeof(int));
|
|
|
|
} else
|
2014-01-28 08:29:44 +00:00
|
|
|
ret = copy_to_user(argp, buf, retlen);
|
2005-02-08 17:45:55 +00:00
|
|
|
if (ret)
|
|
|
|
ret = -EFAULT;
|
|
|
|
}
|
|
|
|
kfree(buf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OTPLOCK:
|
2021-03-03 20:18:19 +00:00
|
|
|
case OTPERASE:
|
2005-02-08 17:45:55 +00:00
|
|
|
{
|
2008-07-04 06:40:14 +00:00
|
|
|
struct otp_info oinfo;
|
2005-02-08 17:45:55 +00:00
|
|
|
|
2011-08-31 01:45:41 +00:00
|
|
|
if (mfi->mode != MTD_FILE_MODE_OTP_USER)
|
2005-02-08 17:45:55 +00:00
|
|
|
return -EINVAL;
|
2008-07-04 06:40:14 +00:00
|
|
|
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
|
2005-02-08 17:45:55 +00:00
|
|
|
return -EFAULT;
|
2021-03-03 20:18:19 +00:00
|
|
|
if (cmd == OTPLOCK)
|
|
|
|
ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
|
|
|
|
else
|
|
|
|
ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length);
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-06-23 21:12:08 +00:00
|
|
|
/* This ioctl is being deprecated - it truncates the ECC layout */
|
2006-05-29 22:37:34 +00:00
|
|
|
case ECCGETLAYOUT:
|
|
|
|
{
|
2010-08-25 01:12:00 +00:00
|
|
|
struct nand_ecclayout_user *usrlay;
|
|
|
|
|
2020-01-14 09:09:52 +00:00
|
|
|
if (!master->ooblayout)
|
2006-05-29 22:37:34 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2010-08-25 01:12:00 +00:00
|
|
|
usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
|
|
|
|
if (!usrlay)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-02-03 19:10:30 +00:00
|
|
|
shrink_ecclayout(mtd, usrlay);
|
2010-08-25 01:12:00 +00:00
|
|
|
|
|
|
|
if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
kfree(usrlay);
|
2006-05-29 22:37:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ECCGETSTATS:
|
|
|
|
{
|
|
|
|
if (copy_to_user(argp, &mtd->ecc_stats,
|
|
|
|
sizeof(struct mtd_ecc_stats)))
|
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MTDFILEMODE:
|
|
|
|
{
|
|
|
|
mfi->mode = 0;
|
|
|
|
|
|
|
|
switch(arg) {
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_OTP_FACTORY:
|
|
|
|
case MTD_FILE_MODE_OTP_USER:
|
2006-05-29 22:37:34 +00:00
|
|
|
ret = otp_select_filemode(mfi, arg);
|
|
|
|
break;
|
|
|
|
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_RAW:
|
2011-12-28 16:35:07 +00:00
|
|
|
if (!mtd_has_oob(mtd))
|
2006-05-29 22:37:34 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
mfi->mode = arg;
|
2021-03-05 08:22:24 +00:00
|
|
|
break;
|
2006-05-29 22:37:34 +00:00
|
|
|
|
2011-08-31 01:45:41 +00:00
|
|
|
case MTD_FILE_MODE_NORMAL:
|
2006-05-29 22:37:34 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
file->f_pos = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-09-17 10:31:42 +00:00
|
|
|
case BLKPG:
|
|
|
|
{
|
2015-09-21 20:26:59 +00:00
|
|
|
struct blkpg_ioctl_arg __user *blk_arg = argp;
|
|
|
|
struct blkpg_ioctl_arg a;
|
|
|
|
|
|
|
|
if (copy_from_user(&a, blk_arg, sizeof(a)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
|
|
|
ret = mtdchar_blkpg_ioctl(mtd, &a);
|
2010-09-17 10:31:42 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case BLKRRPART:
|
|
|
|
{
|
|
|
|
/* No reread partition feature. Just return ok */
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
} /* memory_ioctl */
|
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
|
2010-04-26 22:24:05 +00:00
|
|
|
{
|
2021-02-17 21:18:45 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
|
|
struct mtd_info *master = mtd_get_master(mtd);
|
2010-04-26 22:24:05 +00:00
|
|
|
int ret;
|
|
|
|
|
2021-02-17 21:18:45 +00:00
|
|
|
mutex_lock(&master->master.chrdev_lock);
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_ioctl(file, cmd, arg);
|
2021-02-17 21:18:45 +00:00
|
|
|
mutex_unlock(&master->master.chrdev_lock);
|
2010-04-26 22:24:05 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
|
|
|
|
struct mtd_oob_buf32 {
|
|
|
|
u_int32_t start;
|
|
|
|
u_int32_t length;
|
|
|
|
compat_caddr_t ptr; /* unsigned char* */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
|
|
|
|
#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
|
|
|
|
|
2011-12-23 15:27:46 +00:00
|
|
|
static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
|
2009-04-09 05:53:13 +00:00
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2021-02-17 21:18:45 +00:00
|
|
|
struct mtd_info *master = mtd_get_master(mtd);
|
2009-05-29 15:09:08 +00:00
|
|
|
void __user *argp = compat_ptr(arg);
|
2009-04-09 05:53:13 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2021-02-17 21:18:45 +00:00
|
|
|
mutex_lock(&master->master.chrdev_lock);
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case MEMWRITEOOB32:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf32 buf;
|
|
|
|
struct mtd_oob_buf32 __user *buf_user = argp;
|
|
|
|
|
2020-07-16 11:53:46 +00:00
|
|
|
if (!(file->f_mode & FMODE_WRITE)) {
|
|
|
|
ret = -EPERM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_writeoob(file, mtd, buf.start,
|
2009-04-09 05:53:13 +00:00
|
|
|
buf.length, compat_ptr(buf.ptr),
|
|
|
|
&buf_user->length);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMREADOOB32:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf32 buf;
|
|
|
|
struct mtd_oob_buf32 __user *buf_user = argp;
|
|
|
|
|
|
|
|
/* NOTE: writes return length to buf->start */
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_readoob(file, mtd, buf.start,
|
2009-04-09 05:53:13 +00:00
|
|
|
buf.length, compat_ptr(buf.ptr),
|
|
|
|
&buf_user->start);
|
|
|
|
break;
|
|
|
|
}
|
2015-09-21 20:26:59 +00:00
|
|
|
|
|
|
|
case BLKPG:
|
|
|
|
{
|
|
|
|
/* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
|
|
|
|
struct blkpg_compat_ioctl_arg __user *uarg = argp;
|
|
|
|
struct blkpg_compat_ioctl_arg compat_arg;
|
|
|
|
struct blkpg_ioctl_arg a;
|
|
|
|
|
|
|
|
if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&a, 0, sizeof(a));
|
|
|
|
a.op = compat_arg.op;
|
|
|
|
a.flags = compat_arg.flags;
|
|
|
|
a.datalen = compat_arg.datalen;
|
|
|
|
a.data = compat_ptr(compat_arg.data);
|
|
|
|
|
|
|
|
ret = mtdchar_blkpg_ioctl(mtd, &a);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
default:
|
2011-12-23 15:27:46 +00:00
|
|
|
ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
|
2009-04-09 05:53:13 +00:00
|
|
|
}
|
|
|
|
|
2021-02-17 21:18:45 +00:00
|
|
|
mutex_unlock(&master->master.chrdev_lock);
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
2009-02-12 10:40:00 +00:00
|
|
|
/*
|
|
|
|
* try to determine where a shared mapping can be made
|
|
|
|
* - only supported for NOMMU at the moment (MMU can't doesn't copy private
|
|
|
|
* mappings)
|
|
|
|
*/
|
|
|
|
#ifndef CONFIG_MMU
|
2011-12-23 15:27:46 +00:00
|
|
|
static unsigned long mtdchar_get_unmapped_area(struct file *file,
|
2009-02-12 10:40:00 +00:00
|
|
|
unsigned long addr,
|
|
|
|
unsigned long len,
|
|
|
|
unsigned long pgoff,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2011-12-30 12:31:57 +00:00
|
|
|
unsigned long offset;
|
|
|
|
int ret;
|
2009-02-12 10:40:00 +00:00
|
|
|
|
2011-12-30 12:31:57 +00:00
|
|
|
if (addr != 0)
|
|
|
|
return (unsigned long) -EINVAL;
|
2009-02-12 10:40:00 +00:00
|
|
|
|
2011-12-30 12:31:57 +00:00
|
|
|
if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
|
|
|
|
return (unsigned long) -EINVAL;
|
2009-02-12 10:40:00 +00:00
|
|
|
|
2011-12-30 12:31:57 +00:00
|
|
|
offset = pgoff << PAGE_SHIFT;
|
|
|
|
if (offset > mtd->size - len)
|
|
|
|
return (unsigned long) -EINVAL;
|
2009-02-12 10:40:00 +00:00
|
|
|
|
2011-12-30 12:31:57 +00:00
|
|
|
ret = mtd_get_unmapped_area(mtd, len, offset, flags);
|
2013-10-28 16:08:15 +00:00
|
|
|
return ret == -EOPNOTSUPP ? -ENODEV : ret;
|
2009-02-12 10:40:00 +00:00
|
|
|
}
|
2015-01-14 09:42:32 +00:00
|
|
|
|
|
|
|
static unsigned mtdchar_mmap_capabilities(struct file *file)
|
|
|
|
{
|
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
|
|
|
|
return mtd_mmap_capabilities(mfi->mtd);
|
|
|
|
}
|
2009-02-12 10:40:00 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set up a mapping for shared memory segments
|
|
|
|
*/
|
2011-12-23 15:27:46 +00:00
|
|
|
static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
|
2009-02-12 10:40:00 +00:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2010-06-15 07:30:15 +00:00
|
|
|
struct map_info *map = mtd->priv;
|
|
|
|
|
2012-10-09 14:08:10 +00:00
|
|
|
/* This is broken because it assumes the MTD device is map-based
|
|
|
|
and that mtd->priv is a valid struct map_info. It should be
|
|
|
|
replaced with something that uses the mtd_get_unmapped_area()
|
|
|
|
operation properly. */
|
|
|
|
if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
|
2010-06-15 07:30:15 +00:00
|
|
|
#ifdef pgprot_noncached
|
2013-04-19 16:53:07 +00:00
|
|
|
if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
|
2010-06-15 07:30:15 +00:00
|
|
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
|
|
#endif
|
2013-04-19 16:53:07 +00:00
|
|
|
return vm_iomap_memory(vma, map->phys, map->size);
|
2010-06-15 07:30:15 +00:00
|
|
|
}
|
2013-10-28 16:08:15 +00:00
|
|
|
return -ENODEV;
|
2009-02-12 10:40:00 +00:00
|
|
|
#else
|
2013-10-28 16:08:15 +00:00
|
|
|
return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
|
2009-02-12 10:40:00 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations mtd_fops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.owner = THIS_MODULE,
|
2011-12-23 15:27:46 +00:00
|
|
|
.llseek = mtdchar_lseek,
|
|
|
|
.read = mtdchar_read,
|
|
|
|
.write = mtdchar_write,
|
|
|
|
.unlocked_ioctl = mtdchar_unlocked_ioctl,
|
2009-04-09 05:53:13 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
2011-12-23 15:27:46 +00:00
|
|
|
.compat_ioctl = mtdchar_compat_ioctl,
|
2009-04-09 05:53:13 +00:00
|
|
|
#endif
|
2011-12-23 15:27:46 +00:00
|
|
|
.open = mtdchar_open,
|
|
|
|
.release = mtdchar_close,
|
|
|
|
.mmap = mtdchar_mmap,
|
2009-02-12 10:40:00 +00:00
|
|
|
#ifndef CONFIG_MMU
|
2011-12-23 15:27:46 +00:00
|
|
|
.get_unmapped_area = mtdchar_get_unmapped_area,
|
2015-01-14 09:42:32 +00:00
|
|
|
.mmap_capabilities = mtdchar_mmap_capabilities,
|
2009-02-12 10:40:00 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
mtd: merge mtdchar module with mtdcore
The MTD subsystem has historically tried to be as configurable as possible. The
side-effect of this is that its configuration menu is rather large, and we are
gradually shrinking it. For example, we recently merged partitions support with
the mtdcore.
This patch does the next step - it merges the mtdchar module to mtdcore. And in
this case this is not only about eliminating too fine-grained separation and
simplifying the configuration menu. This is also about eliminating seemingly
useless kernel module.
Indeed, mtdchar is a module that allows user-space making use of MTD devices
via /dev/mtd* character devices. If users do not enable it, they simply cannot
use MTD devices at all. They cannot read or write the flash contents. Is it a
sane and useful setup? I believe not. And everyone just enables mtdchar.
Having mtdchar separate is also a little bit harmful. People sometimes miss the
fact that they need to enable an additional configuration option to have
user-space MTD interfaces, and then they wonder why on earth the kernel does
not allow using the flash? They spend time asking around.
Thus, let's just get rid of this module and make it part of mtd core.
Note, mtdchar had additional configuration option to enable OTP interfaces,
which are present on some flashes. I removed that option as well - it saves a
really tiny amount space.
[dwmw2: Strictly speaking, you can mount file systems on MTD devices just
fine without the mtdchar (or mtdblock) devices; you just can't do
other manipulations directly on the underlying device. But still I
agree that it makes sense to make this unconditional. And Yay! we
get to kill off an instance of checking CONFIG_foo_MODULE, which is
an abomination that should never happen.]
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2013-03-14 11:27:40 +00:00
|
|
|
int __init init_mtdchar(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-05-17 13:55:47 +00:00
|
|
|
int ret;
|
2009-03-26 07:42:41 +00:00
|
|
|
|
2010-05-17 13:55:47 +00:00
|
|
|
ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
|
2010-01-29 21:00:04 +00:00
|
|
|
"mtd", &mtd_fops);
|
2010-05-17 13:55:47 +00:00
|
|
|
if (ret < 0) {
|
2013-03-15 10:59:36 +00:00
|
|
|
pr_err("Can't allocate major number %d for MTD\n",
|
|
|
|
MTD_CHAR_MAJOR);
|
2010-05-17 13:55:47 +00:00
|
|
|
return ret;
|
2005-06-30 00:23:27 +00:00
|
|
|
}
|
|
|
|
|
2010-05-17 13:55:47 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
mtd: merge mtdchar module with mtdcore
The MTD subsystem has historically tried to be as configurable as possible. The
side-effect of this is that its configuration menu is rather large, and we are
gradually shrinking it. For example, we recently merged partitions support with
the mtdcore.
This patch does the next step - it merges the mtdchar module to mtdcore. And in
this case this is not only about eliminating too fine-grained separation and
simplifying the configuration menu. This is also about eliminating seemingly
useless kernel module.
Indeed, mtdchar is a module that allows user-space making use of MTD devices
via /dev/mtd* character devices. If users do not enable it, they simply cannot
use MTD devices at all. They cannot read or write the flash contents. Is it a
sane and useful setup? I believe not. And everyone just enables mtdchar.
Having mtdchar separate is also a little bit harmful. People sometimes miss the
fact that they need to enable an additional configuration option to have
user-space MTD interfaces, and then they wonder why on earth the kernel does
not allow using the flash? They spend time asking around.
Thus, let's just get rid of this module and make it part of mtd core.
Note, mtdchar had additional configuration option to enable OTP interfaces,
which are present on some flashes. I removed that option as well - it saves a
really tiny amount space.
[dwmw2: Strictly speaking, you can mount file systems on MTD devices just
fine without the mtdchar (or mtdblock) devices; you just can't do
other manipulations directly on the underlying device. But still I
agree that it makes sense to make this unconditional. And Yay! we
get to kill off an instance of checking CONFIG_foo_MODULE, which is
an abomination that should never happen.]
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2013-03-14 11:27:40 +00:00
|
|
|
void __exit cleanup_mtdchar(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-01-29 21:00:04 +00:00
|
|
|
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-03-02 18:42:39 +00:00
|
|
|
MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
|