2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2010-08-08 19:58:20 +00:00
|
|
|
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2005-11-06 23:14:42 +00:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/fs.h>
|
2007-08-10 20:01:06 +00:00
|
|
|
#include <linux/mm.h>
|
2006-10-11 11:52:47 +00:00
|
|
|
#include <linux/err.h>
|
2005-11-06 23:14:42 +00:00
|
|
|
#include <linux/init.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
2005-11-06 23:14:42 +00:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sched.h>
|
2010-06-02 12:28:52 +00:00
|
|
|
#include <linux/mutex.h>
|
2009-02-12 10:40:00 +00:00
|
|
|
#include <linux/backing-dev.h>
|
2009-04-09 05:53:13 +00:00
|
|
|
#include <linux/compat.h>
|
2010-05-17 13:55:47 +00:00
|
|
|
#include <linux/mount.h>
|
2010-09-17 10:31:42 +00:00
|
|
|
#include <linux/blkpg.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mtd/mtd.h>
|
2010-09-17 10:31:42 +00:00
|
|
|
#include <linux/mtd/partitions.h>
|
2010-06-15 07:30:15 +00:00
|
|
|
#include <linux/mtd/map.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-11-06 23:14:42 +00:00
|
|
|
#include <asm/uaccess.h>
|
2005-06-30 00:23:27 +00:00
|
|
|
|
2010-05-17 13:55:47 +00:00
|
|
|
#define MTD_INODE_FS_MAGIC 0x11307854
|
2010-06-02 12:28:52 +00:00
|
|
|
static DEFINE_MUTEX(mtd_mutex);
|
2010-05-17 13:55:47 +00:00
|
|
|
static struct vfsmount *mtd_inode_mnt __read_mostly;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-02-08 19:12:53 +00:00
|
|
|
/*
|
2006-05-29 22:37:34 +00:00
|
|
|
* Data structure to hold the pointer to the mtd device as well
|
2011-07-20 16:53:42 +00:00
|
|
|
* as mode information of various use cases.
|
2005-02-08 19:12:53 +00:00
|
|
|
*/
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info {
|
|
|
|
struct mtd_info *mtd;
|
2010-05-17 13:55:47 +00:00
|
|
|
struct inode *ino;
|
2006-05-29 22:37:34 +00:00
|
|
|
enum mtd_file_modes mode;
|
|
|
|
};
|
2005-02-08 17:45:55 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
|
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
switch (orig) {
|
2006-09-17 01:09:29 +00:00
|
|
|
case SEEK_SET:
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2006-09-17 01:09:29 +00:00
|
|
|
case SEEK_CUR:
|
2005-08-04 01:05:51 +00:00
|
|
|
offset += file->f_pos;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2006-09-17 01:09:29 +00:00
|
|
|
case SEEK_END:
|
2005-08-04 01:05:51 +00:00
|
|
|
offset += mtd->size;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
[MTD] CORE mtdchar.c: fix off-by-one error in lseek()
Allow lseek(mtdchar_fd, 0, SEEK_END) to succeed, which currently fails
with EINVAL.
lseek(fd, 0, SEEK_END) should result into the same fileposition as
lseek(fd, 0, SEEK_SET) + read(fd, buf, length(fd))
Furthermore, lseek(fd, 0, SEEK_CUR) should return the current file position,
which in case of an encountered EOF should not result in EINVAL
Signed-off-by: Herbert Valerio Riedel <hvr@gnu.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David Woodhouse <dwmw2@infradead.org>
2006-06-23 22:03:36 +00:00
|
|
|
if (offset >= 0 && offset <= mtd->size)
|
2005-08-04 01:05:51 +00:00
|
|
|
return file->f_pos = offset;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-04 01:05:51 +00:00
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static int mtd_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
int minor = iminor(inode);
|
|
|
|
int devnum = minor >> 1;
|
2008-05-15 16:10:37 +00:00
|
|
|
int ret = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct mtd_info *mtd;
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi;
|
2010-05-17 13:55:47 +00:00
|
|
|
struct inode *mtd_ino;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_open\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* You can't open the RO devices RW */
|
2008-09-02 19:28:45 +00:00
|
|
|
if ((file->f_mode & FMODE_WRITE) && (minor & 1))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EACCES;
|
|
|
|
|
2010-06-02 12:28:52 +00:00
|
|
|
mutex_lock(&mtd_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
mtd = get_mtd_device(NULL, devnum);
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2008-05-15 16:10:37 +00:00
|
|
|
if (IS_ERR(mtd)) {
|
|
|
|
ret = PTR_ERR(mtd);
|
|
|
|
goto out;
|
|
|
|
}
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2009-02-12 10:40:00 +00:00
|
|
|
if (mtd->type == MTD_ABSENT) {
|
2005-04-16 22:20:36 +00:00
|
|
|
put_mtd_device(mtd);
|
2008-05-15 16:10:37 +00:00
|
|
|
ret = -ENODEV;
|
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-05-17 13:55:47 +00:00
|
|
|
mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
|
|
|
|
if (!mtd_ino) {
|
|
|
|
put_mtd_device(mtd);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (mtd_ino->i_state & I_NEW) {
|
|
|
|
mtd_ino->i_private = mtd;
|
|
|
|
mtd_ino->i_mode = S_IFCHR;
|
|
|
|
mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
|
|
|
|
unlock_new_inode(mtd_ino);
|
|
|
|
}
|
|
|
|
file->f_mapping = mtd_ino->i_mapping;
|
2009-02-12 10:40:00 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* You can't open it RW if it's not a writeable device */
|
2008-09-02 19:28:45 +00:00
|
|
|
if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
|
2010-05-17 13:55:47 +00:00
|
|
|
iput(mtd_ino);
|
2005-04-16 22:20:36 +00:00
|
|
|
put_mtd_device(mtd);
|
2008-05-15 16:10:37 +00:00
|
|
|
ret = -EACCES;
|
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
|
|
|
|
if (!mfi) {
|
2010-05-17 13:55:47 +00:00
|
|
|
iput(mtd_ino);
|
2006-05-29 22:37:34 +00:00
|
|
|
put_mtd_device(mtd);
|
2008-05-15 16:10:37 +00:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out;
|
2006-05-29 22:37:34 +00:00
|
|
|
}
|
2010-05-17 13:55:47 +00:00
|
|
|
mfi->ino = mtd_ino;
|
2006-05-29 22:37:34 +00:00
|
|
|
mfi->mtd = mtd;
|
|
|
|
file->private_data = mfi;
|
|
|
|
|
2008-05-15 16:10:37 +00:00
|
|
|
out:
|
2010-06-02 12:28:52 +00:00
|
|
|
mutex_unlock(&mtd_mutex);
|
2008-05-15 16:10:37 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
} /* mtd_open */
|
|
|
|
|
|
|
|
/*====================================================================*/
|
|
|
|
|
|
|
|
static int mtd_close(struct inode *inode, struct file *file)
|
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_close\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-06-26 22:56:40 +00:00
|
|
|
/* Only sync if opened RW */
|
2008-09-02 19:28:45 +00:00
|
|
|
if ((file->f_mode & FMODE_WRITE) && mtd->sync)
|
2005-04-16 22:20:36 +00:00
|
|
|
mtd->sync(mtd);
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2010-05-17 13:55:47 +00:00
|
|
|
iput(mfi->ino);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
put_mtd_device(mtd);
|
2006-05-29 22:37:34 +00:00
|
|
|
file->private_data = NULL;
|
|
|
|
kfree(mfi);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
} /* mtd_close */
|
|
|
|
|
2011-04-08 15:51:33 +00:00
|
|
|
/* Back in June 2001, dwmw2 wrote:
|
|
|
|
*
|
|
|
|
* FIXME: This _really_ needs to die. In 2.5, we should lock the
|
|
|
|
* userspace buffer down and use it directly with readv/writev.
|
|
|
|
*
|
|
|
|
* The implementation below, using mtd_kmalloc_up_to, mitigates
|
|
|
|
* allocation failures when the system is under low-memory situations
|
|
|
|
* or if memory is highly fragmented at the cost of reducing the
|
|
|
|
* performance of the requested transfer due to a smaller buffer size.
|
|
|
|
*
|
|
|
|
* A more complex but more memory-efficient implementation based on
|
|
|
|
* get_user_pages and iovecs to cover extents of those pages is a
|
|
|
|
* longer-term goal, as intimated by dwmw2 above. However, for the
|
|
|
|
* write case, this requires yet more complex head and tail transfer
|
|
|
|
* handling when those head and tail offsets and sizes are such that
|
|
|
|
* alignment requirements are not met in the NAND subdriver.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
|
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2005-04-16 22:20:36 +00:00
|
|
|
size_t retlen=0;
|
|
|
|
size_t total_retlen=0;
|
|
|
|
int ret=0;
|
|
|
|
int len;
|
2011-04-08 15:51:33 +00:00
|
|
|
size_t size = count;
|
2005-04-16 22:20:36 +00:00
|
|
|
char *kbuf;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_read\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (*ppos + count > mtd->size)
|
|
|
|
count = mtd->size - *ppos;
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2011-04-08 15:51:33 +00:00
|
|
|
kbuf = mtd_kmalloc_up_to(mtd, &size);
|
2006-04-17 16:38:15 +00:00
|
|
|
if (!kbuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
while (count) {
|
2011-04-08 15:51:33 +00:00
|
|
|
len = min_t(size_t, count, size);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
switch (mfi->mode) {
|
|
|
|
case MTD_MODE_OTP_FACTORY:
|
2005-02-08 17:45:55 +00:00
|
|
|
ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
|
|
|
|
break;
|
|
|
|
case MTD_MODE_OTP_USER:
|
|
|
|
ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
|
|
|
|
break;
|
2006-05-29 22:37:34 +00:00
|
|
|
case MTD_MODE_RAW:
|
|
|
|
{
|
|
|
|
struct mtd_oob_ops ops;
|
|
|
|
|
|
|
|
ops.mode = MTD_OOB_RAW;
|
|
|
|
ops.datbuf = kbuf;
|
|
|
|
ops.oobbuf = NULL;
|
|
|
|
ops.len = len;
|
|
|
|
|
|
|
|
ret = mtd->read_oob(mtd, *ppos, &ops);
|
|
|
|
retlen = ops.retlen;
|
|
|
|
break;
|
|
|
|
}
|
2005-02-08 17:45:55 +00:00
|
|
|
default:
|
2006-05-28 09:01:53 +00:00
|
|
|
ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
|
2005-02-08 17:45:55 +00:00
|
|
|
}
|
2011-06-23 21:12:08 +00:00
|
|
|
/* Nand returns -EBADMSG on ECC errors, but it returns
|
2005-04-16 22:20:36 +00:00
|
|
|
* the data. For our userspace tools it is important
|
2011-06-23 21:12:08 +00:00
|
|
|
* to dump areas with ECC errors!
|
2006-05-29 12:56:39 +00:00
|
|
|
* For kernel internal usage it also might return -EUCLEAN
|
2011-03-31 01:57:33 +00:00
|
|
|
* to signal the caller that a bitflip has occurred and has
|
2006-05-29 12:56:39 +00:00
|
|
|
* been corrected by the ECC algorithm.
|
2005-04-16 22:20:36 +00:00
|
|
|
* Userspace software which accesses NAND this way
|
|
|
|
* must be aware of the fact that it deals with NAND
|
|
|
|
*/
|
2006-05-29 12:56:39 +00:00
|
|
|
if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
*ppos += retlen;
|
|
|
|
if (copy_to_user(buf, kbuf, retlen)) {
|
2006-05-28 09:01:53 +00:00
|
|
|
kfree(kbuf);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
total_retlen += retlen;
|
|
|
|
|
|
|
|
count -= retlen;
|
|
|
|
buf += retlen;
|
2005-02-08 17:45:55 +00:00
|
|
|
if (retlen == 0)
|
|
|
|
count = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
kfree(kbuf);
|
|
|
|
return ret;
|
|
|
|
}
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-04-17 16:38:15 +00:00
|
|
|
kfree(kbuf);
|
2005-04-16 22:20:36 +00:00
|
|
|
return total_retlen;
|
|
|
|
} /* mtd_read */
|
|
|
|
|
|
|
|
static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
|
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2011-04-08 15:51:33 +00:00
|
|
|
size_t size = count;
|
2005-04-16 22:20:36 +00:00
|
|
|
char *kbuf;
|
|
|
|
size_t retlen;
|
|
|
|
size_t total_retlen=0;
|
|
|
|
int ret=0;
|
|
|
|
int len;
|
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_write\n");
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (*ppos == mtd->size)
|
|
|
|
return -ENOSPC;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (*ppos + count > mtd->size)
|
|
|
|
count = mtd->size - *ppos;
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
|
|
|
|
2011-04-08 15:51:33 +00:00
|
|
|
kbuf = mtd_kmalloc_up_to(mtd, &size);
|
2006-04-17 16:38:15 +00:00
|
|
|
if (!kbuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
while (count) {
|
2011-04-08 15:51:33 +00:00
|
|
|
len = min_t(size_t, count, size);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (copy_from_user(kbuf, buf, len)) {
|
|
|
|
kfree(kbuf);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
switch (mfi->mode) {
|
|
|
|
case MTD_MODE_OTP_FACTORY:
|
2005-02-08 17:45:55 +00:00
|
|
|
ret = -EROFS;
|
|
|
|
break;
|
|
|
|
case MTD_MODE_OTP_USER:
|
|
|
|
if (!mtd->write_user_prot_reg) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
|
|
|
|
break;
|
2006-05-29 22:37:34 +00:00
|
|
|
|
|
|
|
case MTD_MODE_RAW:
|
|
|
|
{
|
|
|
|
struct mtd_oob_ops ops;
|
|
|
|
|
|
|
|
ops.mode = MTD_OOB_RAW;
|
|
|
|
ops.datbuf = kbuf;
|
|
|
|
ops.oobbuf = NULL;
|
2011-06-06 13:50:58 +00:00
|
|
|
ops.ooboffs = 0;
|
2006-05-29 22:37:34 +00:00
|
|
|
ops.len = len;
|
|
|
|
|
|
|
|
ret = mtd->write_oob(mtd, *ppos, &ops);
|
|
|
|
retlen = ops.retlen;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-02-08 17:45:55 +00:00
|
|
|
default:
|
|
|
|
ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!ret) {
|
|
|
|
*ppos += retlen;
|
|
|
|
total_retlen += retlen;
|
|
|
|
count -= retlen;
|
|
|
|
buf += retlen;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
kfree(kbuf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-17 16:38:15 +00:00
|
|
|
kfree(kbuf);
|
2005-04-16 22:20:36 +00:00
|
|
|
return total_retlen;
|
|
|
|
} /* mtd_write */
|
|
|
|
|
|
|
|
/*======================================================================
|
|
|
|
|
|
|
|
IOCTL calls for getting device parameters.
|
|
|
|
|
|
|
|
======================================================================*/
|
|
|
|
static void mtdchar_erase_callback (struct erase_info *instr)
|
|
|
|
{
|
|
|
|
wake_up((wait_queue_head_t *)instr->priv);
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:35:05 +00:00
|
|
|
#ifdef CONFIG_HAVE_MTD_OTP
|
2006-05-29 22:37:34 +00:00
|
|
|
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
|
|
|
|
{
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
switch (mode) {
|
|
|
|
case MTD_OTP_FACTORY:
|
|
|
|
if (!mtd->read_fact_prot_reg)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
|
|
|
mfi->mode = MTD_MODE_OTP_FACTORY;
|
|
|
|
break;
|
|
|
|
case MTD_OTP_USER:
|
|
|
|
if (!mtd->read_fact_prot_reg)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
|
|
|
mfi->mode = MTD_MODE_OTP_USER;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
case MTD_OTP_OFF:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
# define otp_select_filemode(f,m) -EOPNOTSUPP
|
|
|
|
#endif
|
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
|
|
|
|
uint64_t start, uint32_t length, void __user *ptr,
|
|
|
|
uint32_t __user *retp)
|
|
|
|
{
|
2011-08-31 01:45:37 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
2009-04-09 05:53:13 +00:00
|
|
|
struct mtd_oob_ops ops;
|
|
|
|
uint32_t retlen;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!(file->f_mode & FMODE_WRITE))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (length > 4096)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!mtd->write_oob)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
2010-01-29 09:35:04 +00:00
|
|
|
ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ops.ooblen = length;
|
mtd: do not assume oobsize is power of 2
Previous generations of MTDs all used OOB sizes that were powers of 2,
(e.g., 64, 128). However, newer generations of flash, especially NAND,
use irregular OOB sizes that are not powers of 2 (e.g., 218, 224, 448).
This means we cannot use masks like "mtd->oobsize - 1" to assume that we
will get a proper bitmask for OOB operations.
These masks are really only intended to hide the "page" portion of the
offset, leaving any OOB offset intact, so a masking with the writesize
(which *is* always a power of 2) is valid and makes more sense.
This has been tested for read/write of NAND devices (nanddump/nandwrite)
using nandsim and actual NAND flash.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
2011-08-24 00:17:32 +00:00
|
|
|
ops.ooboffs = start & (mtd->writesize - 1);
|
2009-04-09 05:53:13 +00:00
|
|
|
ops.datbuf = NULL;
|
2011-08-31 01:45:37 +00:00
|
|
|
ops.mode = (mfi->mode == MTD_MODE_RAW) ? MTD_OOB_RAW : MTD_OOB_PLACE;
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-05-22 08:22:49 +00:00
|
|
|
ops.oobbuf = memdup_user(ptr, length);
|
|
|
|
if (IS_ERR(ops.oobbuf))
|
|
|
|
return PTR_ERR(ops.oobbuf);
|
2009-04-09 05:53:13 +00:00
|
|
|
|
mtd: do not assume oobsize is power of 2
Previous generations of MTDs all used OOB sizes that were powers of 2,
(e.g., 64, 128). However, newer generations of flash, especially NAND,
use irregular OOB sizes that are not powers of 2 (e.g., 218, 224, 448).
This means we cannot use masks like "mtd->oobsize - 1" to assume that we
will get a proper bitmask for OOB operations.
These masks are really only intended to hide the "page" portion of the
offset, leaving any OOB offset intact, so a masking with the writesize
(which *is* always a power of 2) is valid and makes more sense.
This has been tested for read/write of NAND devices (nanddump/nandwrite)
using nandsim and actual NAND flash.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
2011-08-24 00:17:32 +00:00
|
|
|
start &= ~((uint64_t)mtd->writesize - 1);
|
2009-04-09 05:53:13 +00:00
|
|
|
ret = mtd->write_oob(mtd, start, &ops);
|
|
|
|
|
|
|
|
if (ops.oobretlen > 0xFFFFFFFFU)
|
|
|
|
ret = -EOVERFLOW;
|
|
|
|
retlen = ops.oobretlen;
|
|
|
|
if (copy_to_user(retp, &retlen, sizeof(length)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
kfree(ops.oobbuf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
|
|
|
|
uint32_t length, void __user *ptr, uint32_t __user *retp)
|
|
|
|
{
|
|
|
|
struct mtd_oob_ops ops;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (length > 4096)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!mtd->read_oob)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
|
|
|
ret = access_ok(VERIFY_WRITE, ptr,
|
|
|
|
length) ? 0 : -EFAULT;
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ops.ooblen = length;
|
mtd: do not assume oobsize is power of 2
Previous generations of MTDs all used OOB sizes that were powers of 2,
(e.g., 64, 128). However, newer generations of flash, especially NAND,
use irregular OOB sizes that are not powers of 2 (e.g., 218, 224, 448).
This means we cannot use masks like "mtd->oobsize - 1" to assume that we
will get a proper bitmask for OOB operations.
These masks are really only intended to hide the "page" portion of the
offset, leaving any OOB offset intact, so a masking with the writesize
(which *is* always a power of 2) is valid and makes more sense.
This has been tested for read/write of NAND devices (nanddump/nandwrite)
using nandsim and actual NAND flash.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
2011-08-24 00:17:32 +00:00
|
|
|
ops.ooboffs = start & (mtd->writesize - 1);
|
2009-04-09 05:53:13 +00:00
|
|
|
ops.datbuf = NULL;
|
|
|
|
ops.mode = MTD_OOB_PLACE;
|
|
|
|
|
|
|
|
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ops.oobbuf = kmalloc(length, GFP_KERNEL);
|
|
|
|
if (!ops.oobbuf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
mtd: do not assume oobsize is power of 2
Previous generations of MTDs all used OOB sizes that were powers of 2,
(e.g., 64, 128). However, newer generations of flash, especially NAND,
use irregular OOB sizes that are not powers of 2 (e.g., 218, 224, 448).
This means we cannot use masks like "mtd->oobsize - 1" to assume that we
will get a proper bitmask for OOB operations.
These masks are really only intended to hide the "page" portion of the
offset, leaving any OOB offset intact, so a masking with the writesize
(which *is* always a power of 2) is valid and makes more sense.
This has been tested for read/write of NAND devices (nanddump/nandwrite)
using nandsim and actual NAND flash.
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
2011-08-24 00:17:32 +00:00
|
|
|
start &= ~((uint64_t)mtd->writesize - 1);
|
2009-04-09 05:53:13 +00:00
|
|
|
ret = mtd->read_oob(mtd, start, &ops);
|
|
|
|
|
|
|
|
if (put_user(ops.oobretlen, retp))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
|
|
|
|
ops.oobretlen))
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
kfree(ops.oobbuf);
|
2011-06-23 23:45:24 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NAND returns -EBADMSG on ECC errors, but it returns the OOB
|
|
|
|
* data. For our userspace tools it is important to dump areas
|
|
|
|
* with ECC errors!
|
|
|
|
* For kernel internal usage it also might return -EUCLEAN
|
|
|
|
* to signal the caller that a bitflip has occured and has
|
|
|
|
* been corrected by the ECC algorithm.
|
|
|
|
*
|
2011-06-28 23:29:00 +00:00
|
|
|
* Note: currently the standard NAND function, nand_read_oob_std,
|
|
|
|
* does not calculate ECC for the OOB area, so do not rely on
|
|
|
|
* this behavior unless you have replaced it with your own.
|
2011-06-23 23:45:24 +00:00
|
|
|
*/
|
|
|
|
if (ret == -EUCLEAN || ret == -EBADMSG)
|
|
|
|
return 0;
|
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-08-25 01:12:00 +00:00
|
|
|
/*
|
|
|
|
* Copies (and truncates, if necessary) data from the larger struct,
|
|
|
|
* nand_ecclayout, to the smaller, deprecated layout struct,
|
2011-07-20 16:53:42 +00:00
|
|
|
* nand_ecclayout_user. This is necessary only to support the deprecated
|
2010-08-25 01:12:00 +00:00
|
|
|
* API ioctl ECCGETLAYOUT while allowing all new functionality to use
|
|
|
|
* nand_ecclayout flexibly (i.e. the struct may change size in new
|
|
|
|
* releases without requiring major rewrites).
|
|
|
|
*/
|
|
|
|
static int shrink_ecclayout(const struct nand_ecclayout *from,
|
|
|
|
struct nand_ecclayout_user *to)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!from || !to)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memset(to, 0, sizeof(*to));
|
|
|
|
|
2010-09-20 06:57:12 +00:00
|
|
|
to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
|
2010-08-25 01:12:00 +00:00
|
|
|
for (i = 0; i < to->eccbytes; i++)
|
|
|
|
to->eccpos[i] = from->eccpos[i];
|
|
|
|
|
|
|
|
for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
|
|
|
|
if (from->oobfree[i].length == 0 &&
|
|
|
|
from->oobfree[i].offset == 0)
|
|
|
|
break;
|
|
|
|
to->oobavail += from->oobfree[i].length;
|
|
|
|
to->oobfree[i] = from->oobfree[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-09-17 10:31:42 +00:00
|
|
|
static int mtd_blkpg_ioctl(struct mtd_info *mtd,
|
|
|
|
struct blkpg_ioctl_arg __user *arg)
|
|
|
|
{
|
|
|
|
struct blkpg_ioctl_arg a;
|
|
|
|
struct blkpg_partition p;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
switch (a.op) {
|
|
|
|
case BLKPG_ADD_PARTITION:
|
|
|
|
|
2010-11-23 12:17:17 +00:00
|
|
|
/* Only master mtd device must be used to add partitions */
|
|
|
|
if (mtd_is_partition(mtd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-09-17 10:31:42 +00:00
|
|
|
return mtd_add_partition(mtd, p.devname, p.start, p.length);
|
|
|
|
|
|
|
|
case BLKPG_DEL_PARTITION:
|
|
|
|
|
|
|
|
if (p.pno < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return mtd_del_partition(mtd, p.pno);
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-26 22:24:05 +00:00
|
|
|
static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-29 22:37:34 +00:00
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2005-04-16 22:20:36 +00:00
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
int ret = 0;
|
|
|
|
u_long size;
|
2006-05-30 12:25:35 +00:00
|
|
|
struct mtd_info_user info;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2011-07-19 17:06:09 +00:00
|
|
|
pr_debug("MTD_ioctl\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
|
|
|
|
if (cmd & IOC_IN) {
|
|
|
|
if (!access_ok(VERIFY_READ, argp, size))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
if (cmd & IOC_OUT) {
|
|
|
|
if (!access_ok(VERIFY_WRITE, argp, size))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case MEMGETREGIONCOUNT:
|
|
|
|
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
|
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MEMGETREGIONINFO:
|
|
|
|
{
|
2008-09-01 12:02:12 +00:00
|
|
|
uint32_t ur_idx;
|
|
|
|
struct mtd_erase_region_info *kr;
|
2010-01-15 18:25:38 +00:00
|
|
|
struct region_info_user __user *ur = argp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-09-01 12:02:12 +00:00
|
|
|
if (get_user(ur_idx, &(ur->regionindex)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
2010-09-08 19:39:56 +00:00
|
|
|
if (ur_idx >= mtd->numeraseregions)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-09-01 12:02:12 +00:00
|
|
|
kr = &(mtd->eraseregions[ur_idx]);
|
|
|
|
|
|
|
|
if (put_user(kr->offset, &(ur->offset))
|
|
|
|
|| put_user(kr->erasesize, &(ur->erasesize))
|
|
|
|
|| put_user(kr->numblocks, &(ur->numblocks)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
2008-09-01 12:02:12 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMGETINFO:
|
2010-11-06 14:41:24 +00:00
|
|
|
memset(&info, 0, sizeof(info));
|
2006-05-30 12:25:35 +00:00
|
|
|
info.type = mtd->type;
|
|
|
|
info.flags = mtd->flags;
|
|
|
|
info.size = mtd->size;
|
|
|
|
info.erasesize = mtd->erasesize;
|
|
|
|
info.writesize = mtd->writesize;
|
|
|
|
info.oobsize = mtd->oobsize;
|
2007-01-30 08:50:43 +00:00
|
|
|
/* The below fields are obsolete */
|
|
|
|
info.ecctype = -1;
|
2006-05-30 12:25:35 +00:00
|
|
|
if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MEMERASE:
|
2009-04-09 05:52:28 +00:00
|
|
|
case MEMERASE64:
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct erase_info *erase;
|
|
|
|
|
2008-09-02 19:28:45 +00:00
|
|
|
if(!(file->f_mode & FMODE_WRITE))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EPERM;
|
|
|
|
|
2006-11-15 19:10:29 +00:00
|
|
|
erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!erase)
|
|
|
|
ret = -ENOMEM;
|
|
|
|
else {
|
|
|
|
wait_queue_head_t waitq;
|
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
|
|
|
|
|
init_waitqueue_head(&waitq);
|
|
|
|
|
2009-04-09 05:52:28 +00:00
|
|
|
if (cmd == MEMERASE64) {
|
|
|
|
struct erase_info_user64 einfo64;
|
|
|
|
|
|
|
|
if (copy_from_user(&einfo64, argp,
|
|
|
|
sizeof(struct erase_info_user64))) {
|
|
|
|
kfree(erase);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
erase->addr = einfo64.start;
|
|
|
|
erase->len = einfo64.length;
|
|
|
|
} else {
|
|
|
|
struct erase_info_user einfo32;
|
|
|
|
|
|
|
|
if (copy_from_user(&einfo32, argp,
|
|
|
|
sizeof(struct erase_info_user))) {
|
|
|
|
kfree(erase);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
erase->addr = einfo32.start;
|
|
|
|
erase->len = einfo32.length;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
erase->mtd = mtd;
|
|
|
|
erase->callback = mtdchar_erase_callback;
|
|
|
|
erase->priv = (unsigned long)&waitq;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
FIXME: Allow INTERRUPTIBLE. Which means
|
|
|
|
not having the wait_queue head on the stack.
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
If the wq_head is on the stack, and we
|
|
|
|
leave because we got interrupted, then the
|
|
|
|
wq_head is no longer there when the
|
|
|
|
callback routine tries to wake us up.
|
|
|
|
*/
|
|
|
|
ret = mtd->erase(mtd, erase);
|
|
|
|
if (!ret) {
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
add_wait_queue(&waitq, &wait);
|
|
|
|
if (erase->state != MTD_ERASE_DONE &&
|
|
|
|
erase->state != MTD_ERASE_FAILED)
|
|
|
|
schedule();
|
|
|
|
remove_wait_queue(&waitq, &wait);
|
|
|
|
set_current_state(TASK_RUNNING);
|
|
|
|
|
|
|
|
ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
|
|
|
|
}
|
|
|
|
kfree(erase);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMWRITEOOB:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf buf;
|
2009-04-09 05:53:13 +00:00
|
|
|
struct mtd_oob_buf __user *buf_user = argp;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
/* NOTE: writes return length to buf_user->length */
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = -EFAULT;
|
2009-04-09 05:53:13 +00:00
|
|
|
else
|
|
|
|
ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
|
|
|
|
buf.ptr, &buf_user->length);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMREADOOB:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf buf;
|
2009-04-09 05:53:13 +00:00
|
|
|
struct mtd_oob_buf __user *buf_user = argp;
|
2006-05-29 01:26:58 +00:00
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
/* NOTE: writes return length to buf_user->start */
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = -EFAULT;
|
2009-04-09 05:53:13 +00:00
|
|
|
else
|
|
|
|
ret = mtd_do_readoob(mtd, buf.start, buf.length,
|
|
|
|
buf.ptr, &buf_user->start);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-04-09 05:53:49 +00:00
|
|
|
case MEMWRITEOOB64:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf64 buf;
|
|
|
|
struct mtd_oob_buf64 __user *buf_user = argp;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
|
|
|
ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
|
|
|
|
(void __user *)(uintptr_t)buf.usr_ptr,
|
|
|
|
&buf_user->length);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMREADOOB64:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf64 buf;
|
|
|
|
struct mtd_oob_buf64 __user *buf_user = argp;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
|
|
|
ret = mtd_do_readoob(mtd, buf.start, buf.length,
|
|
|
|
(void __user *)(uintptr_t)buf.usr_ptr,
|
|
|
|
&buf_user->length);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
case MEMLOCK:
|
|
|
|
{
|
2008-07-04 06:40:14 +00:00
|
|
|
struct erase_info_user einfo;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-04 06:40:14 +00:00
|
|
|
if (copy_from_user(&einfo, argp, sizeof(einfo)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!mtd->lock)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
2008-07-04 06:40:14 +00:00
|
|
|
ret = mtd->lock(mtd, einfo.start, einfo.length);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMUNLOCK:
|
|
|
|
{
|
2008-07-04 06:40:14 +00:00
|
|
|
struct erase_info_user einfo;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-04 06:40:14 +00:00
|
|
|
if (copy_from_user(&einfo, argp, sizeof(einfo)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!mtd->unlock)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
2008-07-04 06:40:14 +00:00
|
|
|
ret = mtd->unlock(mtd, einfo.start, einfo.length);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-06-14 16:10:33 +00:00
|
|
|
case MEMISLOCKED:
|
|
|
|
{
|
|
|
|
struct erase_info_user einfo;
|
|
|
|
|
|
|
|
if (copy_from_user(&einfo, argp, sizeof(einfo)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!mtd->is_locked)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
|
|
|
ret = mtd->is_locked(mtd, einfo.start, einfo.length);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-05-27 20:16:10 +00:00
|
|
|
/* Legacy interface */
|
2005-04-16 22:20:36 +00:00
|
|
|
case MEMGETOOBSEL:
|
|
|
|
{
|
2006-05-27 20:16:10 +00:00
|
|
|
struct nand_oobinfo oi;
|
|
|
|
|
|
|
|
if (!mtd->ecclayout)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
oi.useecc = MTD_NANDECC_AUTOPLACE;
|
|
|
|
memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
|
|
|
|
memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
|
|
|
|
sizeof(oi.oobfree));
|
2006-10-17 15:27:11 +00:00
|
|
|
oi.eccbytes = mtd->ecclayout->eccbytes;
|
2006-05-27 20:16:10 +00:00
|
|
|
|
|
|
|
if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMGETBADBLOCK:
|
|
|
|
{
|
|
|
|
loff_t offs;
|
2005-11-07 11:15:26 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (copy_from_user(&offs, argp, sizeof(loff_t)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (!mtd->block_isbad)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
|
|
|
return mtd->block_isbad(mtd, offs);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMSETBADBLOCK:
|
|
|
|
{
|
|
|
|
loff_t offs;
|
|
|
|
|
|
|
|
if (copy_from_user(&offs, argp, sizeof(loff_t)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (!mtd->block_markbad)
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
else
|
|
|
|
return mtd->block_markbad(mtd, offs);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-07-30 19:35:05 +00:00
|
|
|
#ifdef CONFIG_HAVE_MTD_OTP
|
2005-02-08 17:45:55 +00:00
|
|
|
case OTPSELECT:
|
|
|
|
{
|
|
|
|
int mode;
|
|
|
|
if (copy_from_user(&mode, argp, sizeof(int)))
|
|
|
|
return -EFAULT;
|
2006-05-29 22:37:34 +00:00
|
|
|
|
|
|
|
mfi->mode = MTD_MODE_NORMAL;
|
|
|
|
|
|
|
|
ret = otp_select_filemode(mfi, mode);
|
|
|
|
|
2005-04-01 15:36:15 +00:00
|
|
|
file->f_pos = 0;
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OTPGETREGIONCOUNT:
|
|
|
|
case OTPGETREGIONINFO:
|
|
|
|
{
|
|
|
|
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
ret = -EOPNOTSUPP;
|
2006-05-29 22:37:34 +00:00
|
|
|
switch (mfi->mode) {
|
|
|
|
case MTD_MODE_OTP_FACTORY:
|
2005-02-08 17:45:55 +00:00
|
|
|
if (mtd->get_fact_prot_info)
|
|
|
|
ret = mtd->get_fact_prot_info(mtd, buf, 4096);
|
|
|
|
break;
|
|
|
|
case MTD_MODE_OTP_USER:
|
|
|
|
if (mtd->get_user_prot_info)
|
|
|
|
ret = mtd->get_user_prot_info(mtd, buf, 4096);
|
|
|
|
break;
|
2006-05-29 22:37:34 +00:00
|
|
|
default:
|
|
|
|
break;
|
2005-02-08 17:45:55 +00:00
|
|
|
}
|
|
|
|
if (ret >= 0) {
|
|
|
|
if (cmd == OTPGETREGIONCOUNT) {
|
|
|
|
int nbr = ret / sizeof(struct otp_info);
|
|
|
|
ret = copy_to_user(argp, &nbr, sizeof(int));
|
|
|
|
} else
|
|
|
|
ret = copy_to_user(argp, buf, ret);
|
|
|
|
if (ret)
|
|
|
|
ret = -EFAULT;
|
|
|
|
}
|
|
|
|
kfree(buf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case OTPLOCK:
|
|
|
|
{
|
2008-07-04 06:40:14 +00:00
|
|
|
struct otp_info oinfo;
|
2005-02-08 17:45:55 +00:00
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
if (mfi->mode != MTD_MODE_OTP_USER)
|
2005-02-08 17:45:55 +00:00
|
|
|
return -EINVAL;
|
2008-07-04 06:40:14 +00:00
|
|
|
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
|
2005-02-08 17:45:55 +00:00
|
|
|
return -EFAULT;
|
|
|
|
if (!mtd->lock_user_prot_reg)
|
|
|
|
return -EOPNOTSUPP;
|
2008-07-04 06:40:14 +00:00
|
|
|
ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
|
2005-02-08 17:45:55 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-06-23 21:12:08 +00:00
|
|
|
/* This ioctl is being deprecated - it truncates the ECC layout */
|
2006-05-29 22:37:34 +00:00
|
|
|
case ECCGETLAYOUT:
|
|
|
|
{
|
2010-08-25 01:12:00 +00:00
|
|
|
struct nand_ecclayout_user *usrlay;
|
|
|
|
|
2006-05-29 22:37:34 +00:00
|
|
|
if (!mtd->ecclayout)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2010-08-25 01:12:00 +00:00
|
|
|
usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
|
|
|
|
if (!usrlay)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
shrink_ecclayout(mtd->ecclayout, usrlay);
|
|
|
|
|
|
|
|
if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
kfree(usrlay);
|
2006-05-29 22:37:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ECCGETSTATS:
|
|
|
|
{
|
|
|
|
if (copy_to_user(argp, &mtd->ecc_stats,
|
|
|
|
sizeof(struct mtd_ecc_stats)))
|
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MTDFILEMODE:
|
|
|
|
{
|
|
|
|
mfi->mode = 0;
|
|
|
|
|
|
|
|
switch(arg) {
|
|
|
|
case MTD_MODE_OTP_FACTORY:
|
|
|
|
case MTD_MODE_OTP_USER:
|
|
|
|
ret = otp_select_filemode(mfi, arg);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MTD_MODE_RAW:
|
|
|
|
if (!mtd->read_oob || !mtd->write_oob)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
mfi->mode = arg;
|
|
|
|
|
|
|
|
case MTD_MODE_NORMAL:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
file->f_pos = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-09-17 10:31:42 +00:00
|
|
|
case BLKPG:
|
|
|
|
{
|
|
|
|
ret = mtd_blkpg_ioctl(mtd,
|
|
|
|
(struct blkpg_ioctl_arg __user *)arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case BLKRRPART:
|
|
|
|
{
|
|
|
|
/* No reread partition feature. Just return ok */
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
|
|
|
ret = -ENOTTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
} /* memory_ioctl */
|
|
|
|
|
2010-04-26 22:24:05 +00:00
|
|
|
static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2010-06-02 12:28:52 +00:00
|
|
|
mutex_lock(&mtd_mutex);
|
2010-04-26 22:24:05 +00:00
|
|
|
ret = mtd_ioctl(file, cmd, arg);
|
2010-06-02 12:28:52 +00:00
|
|
|
mutex_unlock(&mtd_mutex);
|
2010-04-26 22:24:05 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-04-09 05:53:13 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
|
|
|
|
struct mtd_oob_buf32 {
|
|
|
|
u_int32_t start;
|
|
|
|
u_int32_t length;
|
|
|
|
compat_caddr_t ptr; /* unsigned char* */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
|
|
|
|
#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
|
|
|
|
|
|
|
|
static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2009-05-29 15:09:08 +00:00
|
|
|
void __user *argp = compat_ptr(arg);
|
2009-04-09 05:53:13 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2010-06-02 12:28:52 +00:00
|
|
|
mutex_lock(&mtd_mutex);
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case MEMWRITEOOB32:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf32 buf;
|
|
|
|
struct mtd_oob_buf32 __user *buf_user = argp;
|
|
|
|
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
|
|
|
ret = mtd_do_writeoob(file, mtd, buf.start,
|
|
|
|
buf.length, compat_ptr(buf.ptr),
|
|
|
|
&buf_user->length);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MEMREADOOB32:
|
|
|
|
{
|
|
|
|
struct mtd_oob_buf32 buf;
|
|
|
|
struct mtd_oob_buf32 __user *buf_user = argp;
|
|
|
|
|
|
|
|
/* NOTE: writes return length to buf->start */
|
|
|
|
if (copy_from_user(&buf, argp, sizeof(buf)))
|
|
|
|
ret = -EFAULT;
|
|
|
|
else
|
|
|
|
ret = mtd_do_readoob(mtd, buf.start,
|
|
|
|
buf.length, compat_ptr(buf.ptr),
|
|
|
|
&buf_user->start);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
2010-04-26 22:24:05 +00:00
|
|
|
ret = mtd_ioctl(file, cmd, (unsigned long)argp);
|
2009-04-09 05:53:13 +00:00
|
|
|
}
|
|
|
|
|
2010-06-02 12:28:52 +00:00
|
|
|
mutex_unlock(&mtd_mutex);
|
2009-04-09 05:53:13 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
2009-02-12 10:40:00 +00:00
|
|
|
/*
|
|
|
|
* try to determine where a shared mapping can be made
|
|
|
|
* - only supported for NOMMU at the moment (MMU can't doesn't copy private
|
|
|
|
* mappings)
|
|
|
|
*/
|
|
|
|
#ifndef CONFIG_MMU
|
|
|
|
static unsigned long mtd_get_unmapped_area(struct file *file,
|
|
|
|
unsigned long addr,
|
|
|
|
unsigned long len,
|
|
|
|
unsigned long pgoff,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
|
|
|
|
|
|
|
if (mtd->get_unmapped_area) {
|
|
|
|
unsigned long offset;
|
|
|
|
|
|
|
|
if (addr != 0)
|
|
|
|
return (unsigned long) -EINVAL;
|
|
|
|
|
|
|
|
if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
|
|
|
|
return (unsigned long) -EINVAL;
|
|
|
|
|
|
|
|
offset = pgoff << PAGE_SHIFT;
|
|
|
|
if (offset > mtd->size - len)
|
|
|
|
return (unsigned long) -EINVAL;
|
|
|
|
|
|
|
|
return mtd->get_unmapped_area(mtd, len, offset, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* can't map directly */
|
|
|
|
return (unsigned long) -ENOSYS;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set up a mapping for shared memory segments
|
|
|
|
*/
|
|
|
|
static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
struct mtd_file_info *mfi = file->private_data;
|
|
|
|
struct mtd_info *mtd = mfi->mtd;
|
2010-06-15 07:30:15 +00:00
|
|
|
struct map_info *map = mtd->priv;
|
|
|
|
unsigned long start;
|
|
|
|
unsigned long off;
|
|
|
|
u32 len;
|
|
|
|
|
|
|
|
if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
|
|
|
|
off = vma->vm_pgoff << PAGE_SHIFT;
|
|
|
|
start = map->phys;
|
|
|
|
len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
|
|
|
|
start &= PAGE_MASK;
|
|
|
|
if ((vma->vm_end - vma->vm_start + off) > len)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
off += start;
|
|
|
|
vma->vm_pgoff = off >> PAGE_SHIFT;
|
|
|
|
vma->vm_flags |= VM_IO | VM_RESERVED;
|
|
|
|
|
|
|
|
#ifdef pgprot_noncached
|
|
|
|
if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
|
|
|
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
|
|
#endif
|
|
|
|
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
|
|
|
|
vma->vm_end - vma->vm_start,
|
|
|
|
vma->vm_page_prot))
|
|
|
|
return -EAGAIN;
|
2009-02-12 10:40:00 +00:00
|
|
|
|
|
|
|
return 0;
|
2010-06-15 07:30:15 +00:00
|
|
|
}
|
2009-02-12 10:40:00 +00:00
|
|
|
return -ENOSYS;
|
|
|
|
#else
|
|
|
|
return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-02-12 08:55:34 +00:00
|
|
|
static const struct file_operations mtd_fops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.llseek = mtd_lseek,
|
|
|
|
.read = mtd_read,
|
|
|
|
.write = mtd_write,
|
2010-04-26 22:24:05 +00:00
|
|
|
.unlocked_ioctl = mtd_unlocked_ioctl,
|
2009-04-09 05:53:13 +00:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
.compat_ioctl = mtd_compat_ioctl,
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
.open = mtd_open,
|
|
|
|
.release = mtd_close,
|
2009-02-12 10:40:00 +00:00
|
|
|
.mmap = mtd_mmap,
|
|
|
|
#ifndef CONFIG_MMU
|
|
|
|
.get_unmapped_area = mtd_get_unmapped_area,
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2010-07-25 19:47:46 +00:00
|
|
|
static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
|
|
|
|
int flags, const char *dev_name, void *data)
|
2010-05-17 13:55:47 +00:00
|
|
|
{
|
2011-01-12 21:59:34 +00:00
|
|
|
return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
|
2010-05-17 13:55:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct file_system_type mtd_inodefs_type = {
|
|
|
|
.name = "mtd_inodefs",
|
2010-07-25 19:47:46 +00:00
|
|
|
.mount = mtd_inodefs_mount,
|
2010-05-17 13:55:47 +00:00
|
|
|
.kill_sb = kill_anon_super,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void mtdchar_notify_add(struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mtdchar_notify_remove(struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
|
|
|
|
|
|
|
|
if (mtd_ino) {
|
|
|
|
/* Destroy the inode if it exists */
|
|
|
|
mtd_ino->i_nlink = 0;
|
|
|
|
iput(mtd_ino);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mtd_notifier mtdchar_notifier = {
|
|
|
|
.add = mtdchar_notify_add,
|
|
|
|
.remove = mtdchar_notify_remove,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static int __init init_mtdchar(void)
|
|
|
|
{
|
2010-05-17 13:55:47 +00:00
|
|
|
int ret;
|
2009-03-26 07:42:41 +00:00
|
|
|
|
2010-05-17 13:55:47 +00:00
|
|
|
ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
|
2010-01-29 21:00:04 +00:00
|
|
|
"mtd", &mtd_fops);
|
2010-05-17 13:55:47 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
pr_notice("Can't allocate major number %d for "
|
|
|
|
"Memory Technology Devices.\n", MTD_CHAR_MAJOR);
|
|
|
|
return ret;
|
2005-06-30 00:23:27 +00:00
|
|
|
}
|
|
|
|
|
2010-05-17 13:55:47 +00:00
|
|
|
ret = register_filesystem(&mtd_inodefs_type);
|
|
|
|
if (ret) {
|
|
|
|
pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
|
|
|
|
goto err_unregister_chdev;
|
|
|
|
}
|
|
|
|
|
|
|
|
mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
|
|
|
|
if (IS_ERR(mtd_inode_mnt)) {
|
|
|
|
ret = PTR_ERR(mtd_inode_mnt);
|
|
|
|
pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
|
|
|
|
goto err_unregister_filesystem;
|
|
|
|
}
|
|
|
|
register_mtd_user(&mtdchar_notifier);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
err_unregister_filesystem:
|
|
|
|
unregister_filesystem(&mtd_inodefs_type);
|
|
|
|
err_unregister_chdev:
|
|
|
|
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
|
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit cleanup_mtdchar(void)
|
|
|
|
{
|
2010-05-17 13:55:47 +00:00
|
|
|
unregister_mtd_user(&mtdchar_notifier);
|
2011-07-19 16:32:38 +00:00
|
|
|
kern_unmount(mtd_inode_mnt);
|
2010-05-17 13:55:47 +00:00
|
|
|
unregister_filesystem(&mtd_inodefs_type);
|
2010-01-29 21:00:04 +00:00
|
|
|
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(init_mtdchar);
|
|
|
|
module_exit(cleanup_mtdchar);
|
|
|
|
|
2009-03-26 07:42:41 +00:00
|
|
|
MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
|
|
|
|
MODULE_DESCRIPTION("Direct character-device access to MTD devices");
|
2009-03-02 18:42:39 +00:00
|
|
|
MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
|