linux/drivers/mtd/spi-nor/spi-nor.c

1547 lines
43 KiB
C
Raw Normal View History

/*
* Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
* influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*
* This code is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/math64.h>
mtd: spi-nor: scale up timeout for full-chip erase This patch fixes timeout issues seen on large NOR flash (e.g., 16MB w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The input parameters matter because spi_nor_erase() uses a different code path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7) opcode. Fix: use a different timeout for full-chip erase than for other commands. While most operations can be expected to perform relatively similarly across a variety of NOR flash types and sizes (and therefore might as well use a similar timeout to keep things simple), full-chip erase is unique, because the time it typically takes to complete: (1) is much larger than most operations and (2) scales with the size of the flash. Let's base our timeout on the original comments stuck here -- that a 2MB flash requires max 40s to erase. Small survey of a few flash datasheets I have lying around: Chip Size (MB) Max chip erase (seconds) ---- -------- ------------------------ w25q32fw 4 50 w25q64cv 8 30 w25q64fw 8 100 w25q128fw 16 200 s25fl128s 16 ~256 s25fl256s 32 ~512 From this data, it seems plenty sufficient to say we need to wait for 40 seconds for each 2MB of flash. After this change, it might make some sense to decrease the timeout for everything else, as even the most extreme operations (single block erase?) shouldn't take more than a handful of seconds. But for safety, let's leave it as-is. It's only an error case, after all, so we don't exactly need to optimize it. Signed-off-by: Furquan Shaikh <furquan@google.com> Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-18 21:59:17 +00:00
#include <linux/sizes.h>
#include <linux/mtd/mtd.h>
#include <linux/of_platform.h>
#include <linux/spi/flash.h>
#include <linux/mtd/spi-nor.h>
/* Define max times to check status register before we give up. */
mtd: spi-nor: scale up timeout for full-chip erase This patch fixes timeout issues seen on large NOR flash (e.g., 16MB w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The input parameters matter because spi_nor_erase() uses a different code path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7) opcode. Fix: use a different timeout for full-chip erase than for other commands. While most operations can be expected to perform relatively similarly across a variety of NOR flash types and sizes (and therefore might as well use a similar timeout to keep things simple), full-chip erase is unique, because the time it typically takes to complete: (1) is much larger than most operations and (2) scales with the size of the flash. Let's base our timeout on the original comments stuck here -- that a 2MB flash requires max 40s to erase. Small survey of a few flash datasheets I have lying around: Chip Size (MB) Max chip erase (seconds) ---- -------- ------------------------ w25q32fw 4 50 w25q64cv 8 30 w25q64fw 8 100 w25q128fw 16 200 s25fl128s 16 ~256 s25fl256s 32 ~512 From this data, it seems plenty sufficient to say we need to wait for 40 seconds for each 2MB of flash. After this change, it might make some sense to decrease the timeout for everything else, as even the most extreme operations (single block erase?) shouldn't take more than a handful of seconds. But for safety, let's leave it as-is. It's only an error case, after all, so we don't exactly need to optimize it. Signed-off-by: Furquan Shaikh <furquan@google.com> Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-18 21:59:17 +00:00
/*
* For everything but full-chip erase; probably could be much smaller, but kept
* around for safety for now
*/
#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
/*
* For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
* for larger flash
*/
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
#define SPI_NOR_MAX_ID_LEN 6
#define SPI_NOR_MAX_ADDR_WIDTH 4
struct flash_info {
char *name;
/*
* This array stores the ID bytes.
* The first three bytes are the JEDIC ID.
* JEDEC ID zero means "no ID" (mostly older chips).
*/
u8 id[SPI_NOR_MAX_ID_LEN];
u8 id_len;
/* The size listed here is what works with SPINOR_OP_SE, which isn't
* necessarily called a "sector" by the vendor.
*/
unsigned sector_size;
u16 n_sectors;
u16 page_size;
u16 addr_width;
u16 flags;
#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
#define SST_WRITE BIT(2) /* use SST byte programming */
#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
#define USE_FSR BIT(7) /* use flag status register */
#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
#define SPI_NOR_HAS_TB BIT(9) /*
* Flash SR has Top/Bottom (TB) protect
* bit. Must be used with
* SPI_NOR_HAS_LOCK.
*/
};
#define JEDEC_MFR(info) ((info)->id[0])
static const struct flash_info *spi_nor_match_id(const char *name);
/*
* Read the status register, returning its value in the location
* Return the status register value.
* Returns negative if error occurred.
*/
static int read_sr(struct spi_nor *nor)
{
int ret;
u8 val;
ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
if (ret < 0) {
pr_err("error %d reading SR\n", (int) ret);
return ret;
}
return val;
}
/*
* Read the flag status register, returning its value in the location
* Return the status register value.
* Returns negative if error occurred.
*/
static int read_fsr(struct spi_nor *nor)
{
int ret;
u8 val;
ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
if (ret < 0) {
pr_err("error %d reading FSR\n", ret);
return ret;
}
return val;
}
/*
* Read configuration register, returning its value in the
* location. Return the configuration register value.
* Returns negative if error occured.
*/
static int read_cr(struct spi_nor *nor)
{
int ret;
u8 val;
ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
if (ret < 0) {
dev_err(nor->dev, "error %d reading CR\n", ret);
return ret;
}
return val;
}
/*
* Dummy Cycle calculation for different type of read.
* It can be used to support more commands with
* different dummy cycle requirements.
*/
static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
{
switch (nor->flash_read) {
case SPI_NOR_FAST:
case SPI_NOR_DUAL:
case SPI_NOR_QUAD:
return 8;
case SPI_NOR_NORMAL:
return 0;
}
return 0;
}
/*
* Write status register 1 byte
* Returns negative if error occurred.
*/
static inline int write_sr(struct spi_nor *nor, u8 val)
{
nor->cmd_buf[0] = val;
return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
}
/*
* Set write enable latch with Write Enable command.
* Returns negative if error occurred.
*/
static inline int write_enable(struct spi_nor *nor)
{
return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
}
/*
* Send write disble instruction to the chip.
*/
static inline int write_disable(struct spi_nor *nor)
{
return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
}
static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
{
return mtd->priv;
}
/* Enable/disable 4-byte addressing mode. */
static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
int enable)
{
int status;
bool need_wren = false;
u8 cmd;
switch (JEDEC_MFR(info)) {
case SNOR_MFR_MICRON:
/* Some Micron need WREN command; all will accept it */
need_wren = true;
case SNOR_MFR_MACRONIX:
case SNOR_MFR_WINBOND:
if (need_wren)
write_enable(nor);
cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
status = nor->write_reg(nor, cmd, NULL, 0);
if (need_wren)
write_disable(nor);
return status;
default:
/* Spansion style */
nor->cmd_buf[0] = enable << 7;
return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
}
}
static inline int spi_nor_sr_ready(struct spi_nor *nor)
{
int sr = read_sr(nor);
if (sr < 0)
return sr;
else
return !(sr & SR_WIP);
}
static inline int spi_nor_fsr_ready(struct spi_nor *nor)
{
int fsr = read_fsr(nor);
if (fsr < 0)
return fsr;
else
return fsr & FSR_READY;
}
static int spi_nor_ready(struct spi_nor *nor)
{
int sr, fsr;
sr = spi_nor_sr_ready(nor);
if (sr < 0)
return sr;
fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
if (fsr < 0)
return fsr;
return sr && fsr;
}
/*
* Service routine to read status register until ready, or timeout occurs.
* Returns non-zero if error.
*/
mtd: spi-nor: scale up timeout for full-chip erase This patch fixes timeout issues seen on large NOR flash (e.g., 16MB w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The input parameters matter because spi_nor_erase() uses a different code path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7) opcode. Fix: use a different timeout for full-chip erase than for other commands. While most operations can be expected to perform relatively similarly across a variety of NOR flash types and sizes (and therefore might as well use a similar timeout to keep things simple), full-chip erase is unique, because the time it typically takes to complete: (1) is much larger than most operations and (2) scales with the size of the flash. Let's base our timeout on the original comments stuck here -- that a 2MB flash requires max 40s to erase. Small survey of a few flash datasheets I have lying around: Chip Size (MB) Max chip erase (seconds) ---- -------- ------------------------ w25q32fw 4 50 w25q64cv 8 30 w25q64fw 8 100 w25q128fw 16 200 s25fl128s 16 ~256 s25fl256s 32 ~512 From this data, it seems plenty sufficient to say we need to wait for 40 seconds for each 2MB of flash. After this change, it might make some sense to decrease the timeout for everything else, as even the most extreme operations (single block erase?) shouldn't take more than a handful of seconds. But for safety, let's leave it as-is. It's only an error case, after all, so we don't exactly need to optimize it. Signed-off-by: Furquan Shaikh <furquan@google.com> Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-18 21:59:17 +00:00
static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
unsigned long timeout_jiffies)
{
unsigned long deadline;
int timeout = 0, ret;
mtd: spi-nor: scale up timeout for full-chip erase This patch fixes timeout issues seen on large NOR flash (e.g., 16MB w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The input parameters matter because spi_nor_erase() uses a different code path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7) opcode. Fix: use a different timeout for full-chip erase than for other commands. While most operations can be expected to perform relatively similarly across a variety of NOR flash types and sizes (and therefore might as well use a similar timeout to keep things simple), full-chip erase is unique, because the time it typically takes to complete: (1) is much larger than most operations and (2) scales with the size of the flash. Let's base our timeout on the original comments stuck here -- that a 2MB flash requires max 40s to erase. Small survey of a few flash datasheets I have lying around: Chip Size (MB) Max chip erase (seconds) ---- -------- ------------------------ w25q32fw 4 50 w25q64cv 8 30 w25q64fw 8 100 w25q128fw 16 200 s25fl128s 16 ~256 s25fl256s 32 ~512 From this data, it seems plenty sufficient to say we need to wait for 40 seconds for each 2MB of flash. After this change, it might make some sense to decrease the timeout for everything else, as even the most extreme operations (single block erase?) shouldn't take more than a handful of seconds. But for safety, let's leave it as-is. It's only an error case, after all, so we don't exactly need to optimize it. Signed-off-by: Furquan Shaikh <furquan@google.com> Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-18 21:59:17 +00:00
deadline = jiffies + timeout_jiffies;
while (!timeout) {
if (time_after_eq(jiffies, deadline))
timeout = 1;
ret = spi_nor_ready(nor);
if (ret < 0)
return ret;
if (ret)
return 0;
cond_resched();
}
dev_err(nor->dev, "flash operation timed out\n");
return -ETIMEDOUT;
}
mtd: spi-nor: scale up timeout for full-chip erase This patch fixes timeout issues seen on large NOR flash (e.g., 16MB w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The input parameters matter because spi_nor_erase() uses a different code path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7) opcode. Fix: use a different timeout for full-chip erase than for other commands. While most operations can be expected to perform relatively similarly across a variety of NOR flash types and sizes (and therefore might as well use a similar timeout to keep things simple), full-chip erase is unique, because the time it typically takes to complete: (1) is much larger than most operations and (2) scales with the size of the flash. Let's base our timeout on the original comments stuck here -- that a 2MB flash requires max 40s to erase. Small survey of a few flash datasheets I have lying around: Chip Size (MB) Max chip erase (seconds) ---- -------- ------------------------ w25q32fw 4 50 w25q64cv 8 30 w25q64fw 8 100 w25q128fw 16 200 s25fl128s 16 ~256 s25fl256s 32 ~512 From this data, it seems plenty sufficient to say we need to wait for 40 seconds for each 2MB of flash. After this change, it might make some sense to decrease the timeout for everything else, as even the most extreme operations (single block erase?) shouldn't take more than a handful of seconds. But for safety, let's leave it as-is. It's only an error case, after all, so we don't exactly need to optimize it. Signed-off-by: Furquan Shaikh <furquan@google.com> Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-18 21:59:17 +00:00
static int spi_nor_wait_till_ready(struct spi_nor *nor)
{
return spi_nor_wait_till_ready_with_timeout(nor,
DEFAULT_READY_WAIT_JIFFIES);
}
/*
* Erase the whole flash memory
*
* Returns 0 if successful, non-zero otherwise.
*/
static int erase_chip(struct spi_nor *nor)
{
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
}
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
{
int ret = 0;
mutex_lock(&nor->lock);
if (nor->prepare) {
ret = nor->prepare(nor, ops);
if (ret) {
dev_err(nor->dev, "failed in the preparation.\n");
mutex_unlock(&nor->lock);
return ret;
}
}
return ret;
}
static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
{
if (nor->unprepare)
nor->unprepare(nor, ops);
mutex_unlock(&nor->lock);
}
/*
* Initiate the erasure of a single sector
*/
static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
{
u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
int i;
if (nor->erase)
return nor->erase(nor, addr);
/*
* Default implementation, if driver doesn't have a specialized HW
* control
*/
for (i = nor->addr_width - 1; i >= 0; i--) {
buf[i] = addr & 0xff;
addr >>= 8;
}
return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
}
/*
* Erase an address range on the nor chip. The address range may extend
* one or more erase sectors. Return an error is there is a problem erasing.
*/
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
u32 addr, len;
uint32_t rem;
int ret;
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
(long long)instr->len);
div_u64_rem(instr->len, mtd->erasesize, &rem);
if (rem)
return -EINVAL;
addr = instr->addr;
len = instr->len;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
if (ret)
return ret;
/* whole-chip erase? */
if (len == mtd->size) {
mtd: spi-nor: scale up timeout for full-chip erase This patch fixes timeout issues seen on large NOR flash (e.g., 16MB w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The input parameters matter because spi_nor_erase() uses a different code path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7) opcode. Fix: use a different timeout for full-chip erase than for other commands. While most operations can be expected to perform relatively similarly across a variety of NOR flash types and sizes (and therefore might as well use a similar timeout to keep things simple), full-chip erase is unique, because the time it typically takes to complete: (1) is much larger than most operations and (2) scales with the size of the flash. Let's base our timeout on the original comments stuck here -- that a 2MB flash requires max 40s to erase. Small survey of a few flash datasheets I have lying around: Chip Size (MB) Max chip erase (seconds) ---- -------- ------------------------ w25q32fw 4 50 w25q64cv 8 30 w25q64fw 8 100 w25q128fw 16 200 s25fl128s 16 ~256 s25fl256s 32 ~512 From this data, it seems plenty sufficient to say we need to wait for 40 seconds for each 2MB of flash. After this change, it might make some sense to decrease the timeout for everything else, as even the most extreme operations (single block erase?) shouldn't take more than a handful of seconds. But for safety, let's leave it as-is. It's only an error case, after all, so we don't exactly need to optimize it. Signed-off-by: Furquan Shaikh <furquan@google.com> Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-18 21:59:17 +00:00
unsigned long timeout;
write_enable(nor);
if (erase_chip(nor)) {
ret = -EIO;
goto erase_err;
}
mtd: spi-nor: scale up timeout for full-chip erase This patch fixes timeout issues seen on large NOR flash (e.g., 16MB w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The input parameters matter because spi_nor_erase() uses a different code path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7) opcode. Fix: use a different timeout for full-chip erase than for other commands. While most operations can be expected to perform relatively similarly across a variety of NOR flash types and sizes (and therefore might as well use a similar timeout to keep things simple), full-chip erase is unique, because the time it typically takes to complete: (1) is much larger than most operations and (2) scales with the size of the flash. Let's base our timeout on the original comments stuck here -- that a 2MB flash requires max 40s to erase. Small survey of a few flash datasheets I have lying around: Chip Size (MB) Max chip erase (seconds) ---- -------- ------------------------ w25q32fw 4 50 w25q64cv 8 30 w25q64fw 8 100 w25q128fw 16 200 s25fl128s 16 ~256 s25fl256s 32 ~512 From this data, it seems plenty sufficient to say we need to wait for 40 seconds for each 2MB of flash. After this change, it might make some sense to decrease the timeout for everything else, as even the most extreme operations (single block erase?) shouldn't take more than a handful of seconds. But for safety, let's leave it as-is. It's only an error case, after all, so we don't exactly need to optimize it. Signed-off-by: Furquan Shaikh <furquan@google.com> Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-18 21:59:17 +00:00
/*
* Scale the timeout linearly with the size of the flash, with
* a minimum calibrated to an old 2MB flash. We could try to
* pull these from CFI/SFDP, but these values should be good
* enough for now.
*/
timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
(unsigned long)(mtd->size / SZ_2M));
ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
if (ret)
goto erase_err;
/* REVISIT in some cases we could speed up erasing large regions
* by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
* to use "small sector erase", but that's not always optimal.
*/
/* "sector"-at-a-time erase */
} else {
while (len) {
write_enable(nor);
ret = spi_nor_erase_sector(nor, addr);
if (ret)
goto erase_err;
addr += mtd->erasesize;
len -= mtd->erasesize;
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto erase_err;
}
}
write_disable(nor);
erase_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
mtd_erase_callback(instr);
return ret;
}
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
struct mtd_info *mtd = &nor->mtd;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
int shift = ffs(mask) - 1;
int pow;
if (!(sr & mask)) {
/* No protection */
*ofs = 0;
*len = 0;
} else {
pow = ((sr & mask) ^ mask) >> shift;
*len = mtd->size >> pow;
if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
*ofs = 0;
else
*ofs = mtd->size - *len;
}
}
/*
* Return 1 if the entire region is locked (if @locked is true) or unlocked (if
* @locked is false); 0 otherwise
*/
static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
u8 sr, bool locked)
{
loff_t lock_offs;
uint64_t lock_len;
if (!len)
return 1;
stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
if (locked)
/* Requested range is a sub-range of locked range */
return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
else
/* Requested range does not overlap with locked range */
return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
}
static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
u8 sr)
{
return stm_check_lock_status_sr(nor, ofs, len, sr, true);
}
static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
u8 sr)
{
return stm_check_lock_status_sr(nor, ofs, len, sr, false);
}
/*
* Lock a region of the flash. Compatible with ST Micro and similar flash.
* Supports the block protection bits BP{0,1,2} in the status register
* (SR). Does not support these features found in newer SR bitfields:
* - SEC: sector/block protect - only handle SEC=0 (block protect)
* - CMP: complement protect - only support CMP=0 (range is not complemented)
*
* Support for the following is provided conditionally for some flash:
* - TB: top/bottom protect
*
* Sample table portion for 8MB flash (Winbond w25q64fw):
*
* SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
* --------------------------------------------------------------------------
* X | X | 0 | 0 | 0 | NONE | NONE
* 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
* 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
* 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
* 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
* 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
* 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
* X | X | 1 | 1 | 1 | 8 MB | ALL
* ------|-------|-------|-------|-------|---------------|-------------------
* 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
* 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
* 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
* 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
* 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
* 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
*
* Returns negative on errors, 0 on success.
*/
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
int status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
int ret;
status_old = read_sr(nor);
if (status_old < 0)
return status_old;
/* If nothing in our range is unlocked, we don't need to do anything */
if (stm_is_locked_sr(nor, ofs, len, status_old))
return 0;
/* If anything below us is unlocked, we can't use 'bottom' protection */
if (!stm_is_locked_sr(nor, 0, ofs, status_old))
can_be_bottom = false;
/* If anything above us is unlocked, we can't use 'top' protection */
if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
status_old))
can_be_top = false;
if (!can_be_bottom && !can_be_top)
return -EINVAL;
/* Prefer top, if both are valid */
use_top = can_be_top;
/* lock_len: length of region that should end up locked */
if (use_top)
lock_len = mtd->size - ofs;
else
lock_len = ofs + len;
/*
* Need smallest pow such that:
*
* 1 / (2^pow) <= (len / size)
*
* so (assuming power-of-2 size) we do:
*
* pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
*/
pow = ilog2(mtd->size) - ilog2(lock_len);
val = mask - (pow << shift);
if (val & ~mask)
return -EINVAL;
/* Don't "lock" with no region! */
if (!(val & mask))
return -EINVAL;
status_new = (status_old & ~mask & ~SR_TB) | val;
/* Disallow further writes if WP pin is asserted */
status_new |= SR_SRWD;
if (!use_top)
status_new |= SR_TB;
/* Don't bother if they're the same */
if (status_new == status_old)
return 0;
/* Only modify protection if it will not unlock other areas */
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
write_enable(nor);
ret = write_sr(nor, status_new);
if (ret)
return ret;
return spi_nor_wait_till_ready(nor);
}
/*
* Unlock a region of the flash. See stm_lock() for more info
*
* Returns negative on errors, 0 on success.
*/
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
struct mtd_info *mtd = &nor->mtd;
int status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
int ret;
status_old = read_sr(nor);
if (status_old < 0)
return status_old;
/* If nothing in our range is locked, we don't need to do anything */
if (stm_is_unlocked_sr(nor, ofs, len, status_old))
return 0;
/* If anything below us is locked, we can't use 'top' protection */
if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
can_be_top = false;
/* If anything above us is locked, we can't use 'bottom' protection */
if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
status_old))
can_be_bottom = false;
if (!can_be_bottom && !can_be_top)
return -EINVAL;
/* Prefer top, if both are valid */
use_top = can_be_top;
/* lock_len: length of region that should remain locked */
if (use_top)
lock_len = mtd->size - (ofs + len);
else
lock_len = ofs;
/*
* Need largest pow such that:
*
* 1 / (2^pow) >= (len / size)
*
* so (assuming power-of-2 size) we do:
*
* pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
*/
pow = ilog2(mtd->size) - order_base_2(lock_len);
if (lock_len == 0) {
val = 0; /* fully unlocked */
} else {
val = mask - (pow << shift);
/* Some power-of-two sizes are not supported */
if (val & ~mask)
return -EINVAL;
}
status_new = (status_old & ~mask & ~SR_TB) | val;
/* Don't protect status register if we're fully unlocked */
if (lock_len == 0)
status_new &= ~SR_SRWD;
if (!use_top)
status_new |= SR_TB;
/* Don't bother if they're the same */
if (status_new == status_old)
return 0;
/* Only modify protection if it will not lock other areas */
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
write_enable(nor);
ret = write_sr(nor, status_new);
if (ret)
return ret;
return spi_nor_wait_till_ready(nor);
}
/*
* Check if a region of the flash is (completely) locked. See stm_lock() for
* more info.
*
* Returns 1 if entire region is locked, 0 if any portion is unlocked, and
* negative on errors.
*/
static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
int status;
status = read_sr(nor);
if (status < 0)
return status;
return stm_is_locked_sr(nor, ofs, len, status);
}
static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
if (ret)
return ret;
ret = nor->flash_lock(nor, ofs, len);
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
return ret;
}
static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
if (ret)
return ret;
ret = nor->flash_unlock(nor, ofs, len);
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
return ret;
}
static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
if (ret)
return ret;
ret = nor->flash_is_locked(nor, ofs, len);
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
return ret;
}
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
(_jedec_id) & 0xff, \
((_ext_id) >> 8) & 0xff, \
(_ext_id) & 0xff, \
}, \
.id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
.flags = (_flags),
#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
.id = { \
((_jedec_id) >> 16) & 0xff, \
((_jedec_id) >> 8) & 0xff, \
(_jedec_id) & 0xff, \
((_ext_id) >> 16) & 0xff, \
((_ext_id) >> 8) & 0xff, \
(_ext_id) & 0xff, \
}, \
.id_len = 6, \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = 256, \
.flags = (_flags),
#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
.sector_size = (_sector_size), \
.n_sectors = (_n_sectors), \
.page_size = (_page_size), \
.addr_width = (_addr_width), \
.flags = (_flags),
/* NOTE: double check command sets and memory organization when you add
* more nor chips. This current list focusses on newer chips, which
* have been converging on command sets which including JEDEC ID.
*
* All newly added entries should describe *hardware* and should use SECT_4K
* (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
* scenarios excluding small sectors there is config option that can be
* disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
* For historical (and compatibility) reasons (before we got above config) some
* old entries may be missing 4K flag.
*/
static const struct flash_info spi_nor_ids[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
{ "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
/* EON -- en25xxx */
{ "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
/* ESMT */
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
/* Fujitsu */
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
/* GigaDevice */
{
"gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
"gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
"gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron */
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
{ "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
{ "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{ "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
/* ST Microelectronics -- newer production may have feature updates */
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
{ "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
{ "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
{ "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
{ "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
{ "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
{ "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
{ "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
{ "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
{ "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
{ "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
{ "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
{ "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
{ "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
{ "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
{ "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
{ "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
{ "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
{ "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
{ "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
{
"w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
{
"w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
"w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ },
};
static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
{
int tmp;
u8 id[SPI_NOR_MAX_ID_LEN];
const struct flash_info *info;
tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
if (tmp < 0) {
dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
return ERR_PTR(tmp);
}
for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
info = &spi_nor_ids[tmp];
if (info->id_len) {
if (!memcmp(info->id, id, info->id_len))
return &spi_nor_ids[tmp];
}
}
dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
id[0], id[1], id[2]);
return ERR_PTR(-ENODEV);
}
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
if (ret)
return ret;
while (len) {
ret = nor->read(nor, from, len, buf);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
goto read_err;
}
if (ret < 0)
goto read_err;
WARN_ON(ret > len);
*retlen += ret;
buf += ret;
from += ret;
len -= ret;
}
ret = 0;
read_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
return ret;
}
static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t actual;
int ret;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
if (ret)
return ret;
write_enable(nor);
nor->sst_write_second = false;
actual = to % 2;
/* Start write from odd address. */
if (actual) {
nor->program_opcode = SPINOR_OP_BP;
/* write one byte. */
ret = nor->write(nor, to, 1, buf);
if (ret < 0)
goto sst_write_err;
WARN(ret != 1, "While writing 1 byte written %i bytes\n",
(int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto sst_write_err;
}
to += actual;
/* Write out most of the data here. */
for (; actual < len - 1; actual += 2) {
nor->program_opcode = SPINOR_OP_AAI_WP;
/* write two bytes. */
ret = nor->write(nor, to, 2, buf + actual);
if (ret < 0)
goto sst_write_err;
WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
(int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto sst_write_err;
to += 2;
nor->sst_write_second = true;
}
nor->sst_write_second = false;
write_disable(nor);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto sst_write_err;
/* Write out trailing byte if it exists. */
if (actual != len) {
write_enable(nor);
nor->program_opcode = SPINOR_OP_BP;
ret = nor->write(nor, to, 1, buf + actual);
if (ret < 0)
goto sst_write_err;
WARN(ret != 1, "While writing 1 byte written %i bytes\n",
(int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto sst_write_err;
write_disable(nor);
actual += 1;
}
sst_write_err:
*retlen += actual;
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
}
/*
* Write an address range to the nor chip. Data must be written in
* FLASH_PAGESIZE chunks. The address range may be any size provided
* it is within the physical boundaries.
*/
static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
if (ret)
return ret;
for (i = 0; i < len; ) {
ssize_t written;
page_offset = (to + i) & (nor->page_size - 1);
WARN_ONCE(page_offset,
"Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.",
page_offset);
/* the size of data remaining on the first page */
page_remain = min_t(size_t,
nor->page_size - page_offset, len - i);
write_enable(nor);
ret = nor->write(nor, to + i, page_remain, buf + i);
if (ret < 0)
goto write_err;
written = ret;
ret = spi_nor_wait_till_ready(nor);
if (ret)
goto write_err;
*retlen += written;
i += written;
if (written != page_remain) {
dev_err(nor->dev,
"While writing %zu bytes written %zd bytes\n",
page_remain, written);
ret = -EIO;
goto write_err;
}
}
write_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
}
static int macronix_quad_enable(struct spi_nor *nor)
{
int ret, val;
val = read_sr(nor);
if (val < 0)
return val;
write_enable(nor);
write_sr(nor, val | SR_QUAD_EN_MX);
if (spi_nor_wait_till_ready(nor))
return 1;
ret = read_sr(nor);
if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
dev_err(nor->dev, "Macronix Quad bit not set\n");
return -EINVAL;
}
return 0;
}
/*
* Write status Register and configuration register with 2 bytes
* The first byte will be written to the status register, while the
* second byte will be written to the configuration register.
* Return negative if error occured.
*/
static int write_sr_cr(struct spi_nor *nor, u16 val)
{
nor->cmd_buf[0] = val & 0xff;
nor->cmd_buf[1] = (val >> 8);
return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2);
}
static int spansion_quad_enable(struct spi_nor *nor)
{
int ret;
int quad_en = CR_QUAD_EN_SPAN << 8;
write_enable(nor);
ret = write_sr_cr(nor, quad_en);
if (ret < 0) {
dev_err(nor->dev,
"error while writing configuration register\n");
return -EINVAL;
}
/* read back and check it */
ret = read_cr(nor);
if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
dev_err(nor->dev, "Spansion Quad bit not set\n");
return -EINVAL;
}
return 0;
}
static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
{
int status;
switch (JEDEC_MFR(info)) {
case SNOR_MFR_MACRONIX:
status = macronix_quad_enable(nor);
if (status) {
dev_err(nor->dev, "Macronix quad-read not enabled\n");
return -EINVAL;
}
return status;
case SNOR_MFR_MICRON:
mtd: spi-nor: remove micron_quad_enable() This patch remove the micron_quad_enable() function which force the Quad SPI mode. However, once this mode is enabled, the Micron memory expect ALL commands to use the SPI 4-4-4 protocol. Hence a failure does occur when calling spi_nor_wait_till_ready() right after the update of the Enhanced Volatile Configuration Register (EVCR) in the micron_quad_enable() as the SPI controller driver is not aware about the protocol change. Since there is almost no performance increase using Fast Read 4-4-4 commands instead of Fast Read 1-1-4 commands, we rather keep on using the Extended SPI mode than enabling the Quad SPI mode. Let's take the example of the pretty standard use of 8 dummy cycles during Fast Read operations on 64KB erase sectors: Fast Read 1-1-4 requires 8 cycles for the command, then 24 cycles for the 3byte address followed by 8 dummy clock cycles and finally 65536*2 cycles for the read data; so 131112 clock cycles. On the other hand the Fast Read 4-4-4 would require 2 cycles for the command, then 6 cycles for the 3byte address followed by 8 dummy clock cycles and finally 65536*2 cycles for the read data. So 131088 clock cycles. The theorical bandwidth increase is 0.0%. Now using Fast Read operations on 512byte pages: Fast Read 1-1-4 needs 8+24+8+(512*2) = 1064 clock cycles whereas Fast Read 4-4-4 would requires 2+6+8+(512*2) = 1040 clock cycles. Hence the theorical bandwidth increase is 2.3%. Consecutive reads for non sequential pages is not a relevant use case so The Quad SPI mode is not worth it. mtd_speedtest seems to confirm these figures. Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com> Fixes: 548cd3ab54da ("mtd: spi-nor: Add quad I/O support for Micron SPI NOR") Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2016-02-03 13:26:46 +00:00
return 0;
default:
status = spansion_quad_enable(nor);
if (status) {
dev_err(nor->dev, "Spansion quad-read not enabled\n");
return -EINVAL;
}
return status;
}
}
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev || !nor->read || !nor->write ||
!nor->read_reg || !nor->write_reg) {
pr_err("spi-nor: please fill all the necessary fields!\n");
return -EINVAL;
}
return 0;
}
int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
{
const struct flash_info *info = NULL;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
struct device_node *np = spi_nor_get_flash_node(nor);
int ret;
int i;
ret = spi_nor_check(nor);
if (ret)
return ret;
if (name)
info = spi_nor_match_id(name);
/* Try to auto-detect if chip name wasn't specified or not found */
if (!info)
info = spi_nor_read_id(nor);
if (IS_ERR_OR_NULL(info))
return -ENOENT;
/*
* If caller has specified name of flash model that can normally be
* detected using JEDEC, let's verify it.
*/
if (name && info->id_len) {
const struct flash_info *jinfo;
jinfo = spi_nor_read_id(nor);
if (IS_ERR(jinfo)) {
return PTR_ERR(jinfo);
} else if (jinfo != info) {
/*
* JEDEC knows better, so overwrite platform ID. We
* can't trust partitions any longer, but we'll let
* mtd apply them anyway, since some partitions may be
* marked read-only, and we don't want to lose that
* information, even if it's not 100% accurate.
*/
dev_warn(dev, "found %s, expected %s\n",
jinfo->name, info->name);
info = jinfo;
}
}
mutex_init(&nor->lock);
/*
* Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
* with the software protection bits set
*/
if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
JEDEC_MFR(info) == SNOR_MFR_INTEL ||
JEDEC_MFR(info) == SNOR_MFR_SST ||
info->flags & SPI_NOR_HAS_LOCK) {
write_enable(nor);
write_sr(nor, 0);
spi_nor_wait_till_ready(nor);
}
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
mtd->type = MTD_NORFLASH;
mtd->writesize = 1;
mtd->flags = MTD_CAP_NORFLASH;
mtd->size = info->sector_size * info->n_sectors;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
/* NOR protection support for STmicro/Micron chips and similar */
if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
info->flags & SPI_NOR_HAS_LOCK) {
nor->flash_lock = stm_lock;
nor->flash_unlock = stm_unlock;
nor->flash_is_locked = stm_is_locked;
}
if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
mtd->_lock = spi_nor_lock;
mtd->_unlock = spi_nor_unlock;
mtd->_is_locked = spi_nor_is_locked;
}
/* sst nor chips use AAI word program */
if (info->flags & SST_WRITE)
mtd->_write = sst_write;
else
mtd->_write = spi_nor_write;
if (info->flags & USE_FSR)
nor->flags |= SNOR_F_USE_FSR;
if (info->flags & SPI_NOR_HAS_TB)
nor->flags |= SNOR_F_HAS_SR_TB;
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
/* prefer "small sector" erase if possible */
if (info->flags & SECT_4K) {
nor->erase_opcode = SPINOR_OP_BE_4K;
mtd->erasesize = 4096;
} else if (info->flags & SECT_4K_PMC) {
nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
mtd->erasesize = 4096;
} else
#endif
{
nor->erase_opcode = SPINOR_OP_SE;
mtd->erasesize = info->sector_size;
}
if (info->flags & SPI_NOR_NO_ERASE)
mtd->flags |= MTD_NO_ERASE;
mtd->dev.parent = dev;
nor->page_size = info->page_size;
mtd->writebufsize = nor->page_size;
if (np) {
/* If we were instantiated by DT, use it */
if (of_property_read_bool(np, "m25p,fast-read"))
nor->flash_read = SPI_NOR_FAST;
else
nor->flash_read = SPI_NOR_NORMAL;
} else {
/* If we weren't instantiated by DT, default to fast-read */
nor->flash_read = SPI_NOR_FAST;
}
/* Some devices cannot do fast-read, no matter what DT tells us */
if (info->flags & SPI_NOR_NO_FR)
nor->flash_read = SPI_NOR_NORMAL;
/* Quad/Dual-read mode takes precedence over fast/normal */
if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
ret = set_quad_mode(nor, info);
if (ret) {
dev_err(dev, "quad mode not supported\n");
return ret;
}
nor->flash_read = SPI_NOR_QUAD;
} else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
nor->flash_read = SPI_NOR_DUAL;
}
/* Default commands */
switch (nor->flash_read) {
case SPI_NOR_QUAD:
nor->read_opcode = SPINOR_OP_READ_1_1_4;
break;
case SPI_NOR_DUAL:
nor->read_opcode = SPINOR_OP_READ_1_1_2;
break;
case SPI_NOR_FAST:
nor->read_opcode = SPINOR_OP_READ_FAST;
break;
case SPI_NOR_NORMAL:
nor->read_opcode = SPINOR_OP_READ;
break;
default:
dev_err(dev, "No Read opcode defined\n");
return -EINVAL;
}
nor->program_opcode = SPINOR_OP_PP;
if (info->addr_width)
nor->addr_width = info->addr_width;
else if (mtd->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
/* Dedicated 4-byte command set */
switch (nor->flash_read) {
case SPI_NOR_QUAD:
nor->read_opcode = SPINOR_OP_READ4_1_1_4;
break;
case SPI_NOR_DUAL:
nor->read_opcode = SPINOR_OP_READ4_1_1_2;
break;
case SPI_NOR_FAST:
nor->read_opcode = SPINOR_OP_READ4_FAST;
break;
case SPI_NOR_NORMAL:
nor->read_opcode = SPINOR_OP_READ4;
break;
}
nor->program_opcode = SPINOR_OP_PP_4B;
/* No small sector erase for 4-byte command set */
nor->erase_opcode = SPINOR_OP_SE_4B;
mtd->erasesize = info->sector_size;
} else
set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
}
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
dev_err(dev, "address width is too large: %u\n",
nor->addr_width);
return -EINVAL;
}
nor->read_dummy = spi_nor_read_dummy_cycles(nor);
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
dev_dbg(dev,
"mtd .name = %s, .size = 0x%llx (%lldMiB), "
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
if (mtd->numeraseregions)
for (i = 0; i < mtd->numeraseregions; i++)
dev_dbg(dev,
"mtd.eraseregions[%d] = { .offset = 0x%llx, "
".erasesize = 0x%.8x (%uKiB), "
".numblocks = %d }\n",
i, (long long)mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].erasesize / 1024,
mtd->eraseregions[i].numblocks);
return 0;
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
static const struct flash_info *spi_nor_match_id(const char *name)
{
const struct flash_info *id = spi_nor_ids;
mtd: spi-nor: fix NULL dereference when no match found in spi_nor_ids[] Commit 06bb6f5a69df ("mtd: spi-nor: stop (ab)using struct spi_device_id") converted an array into a pointer, which means that we should be checking if the pointer goes anywhere, not whether the C string is empty. To do the latter means we dereference a NULL pointer when we reach the terminating entry, for which 'name' is now NULL instead of an array { 0, 0, ... }. Sample crash: [ 1.101371] Unable to handle kernel NULL pointer dereference at virtual address 00000000 [ 1.109457] pgd = c0004000 [ 1.112157] [00000000] *pgd=00000000 [ 1.115736] Internal error: Oops: 5 [#1] SMP ARM [ 1.120345] Modules linked in: [ 1.123405] CPU: 3 PID: 1 Comm: swapper/0 Not tainted 4.2.0-next-20150902+ #61 [ 1.130611] Hardware name: Rockchip (Device Tree) [ 1.135306] task: ee0b8d40 ti: ee0ba000 task.ti: ee0ba000 [ 1.140697] PC is at spi_nor_scan+0x90/0x8c4 [ 1.144958] LR is at spi_nor_scan+0xa4/0x8c4 ... [ 1.504112] [<c03cc2e0>] (spi_nor_scan) from [<c03cb188>] (m25p_probe+0xc8/0x11c) [ 1.511583] [<c03cb188>] (m25p_probe) from [<c03cd9d8>] (spi_drv_probe+0x60/0x7c) [ 1.519055] [<c03cd9d8>] (spi_drv_probe) from [<c037faa0>] (driver_probe_device+0x1a0/0x444) [ 1.527478] [<c037faa0>] (driver_probe_device) from [<c037fec8>] (__device_attach_driver+0x94/0xa0) [ 1.536507] [<c037fec8>] (__device_attach_driver) from [<c037db3c>] (bus_for_each_drv+0x94/0xa4) [ 1.545277] [<c037db3c>] (bus_for_each_drv) from [<c037f7e4>] (__device_attach+0xa4/0x144) [ 1.553526] [<c037f7e4>] (__device_attach) from [<c0380058>] (device_initial_probe+0x1c/0x20) [ 1.562035] [<c0380058>] (device_initial_probe) from [<c037ec88>] (bus_probe_device+0x38/0x94) [ 1.570631] [<c037ec88>] (bus_probe_device) from [<c037ccf4>] (device_add+0x430/0x558) [ 1.578534] [<c037ccf4>] (device_add) from [<c03d0240>] (spi_add_device+0xe4/0x174) [ 1.586178] [<c03d0240>] (spi_add_device) from [<c03d0a24>] (spi_register_master+0x698/0x7d4) [ 1.594688] [<c03d0a24>] (spi_register_master) from [<c03d0ba0>] (devm_spi_register_master+0x40/0x7c) [ 1.603892] [<c03d0ba0>] (devm_spi_register_master) from [<c03d2fb4>] (rockchip_spi_probe+0x360/0x3f4) [ 1.613182] [<c03d2fb4>] (rockchip_spi_probe) from [<c0381e34>] (platform_drv_probe+0x58/0xa8) [ 1.621779] [<c0381e34>] (platform_drv_probe) from [<c037faa0>] (driver_probe_device+0x1a0/0x444) [ 1.630635] [<c037faa0>] (driver_probe_device) from [<c037fdc4>] (__driver_attach+0x80/0xa4) [ 1.639058] [<c037fdc4>] (__driver_attach) from [<c037e850>] (bus_for_each_dev+0x98/0xac) [ 1.647221] [<c037e850>] (bus_for_each_dev) from [<c037f448>] (driver_attach+0x28/0x30) [ 1.655210] [<c037f448>] (driver_attach) from [<c037ef74>] (bus_add_driver+0x128/0x250) [ 1.663200] [<c037ef74>] (bus_add_driver) from [<c0380c40>] (driver_register+0xac/0xf0) [ 1.671191] [<c0380c40>] (driver_register) from [<c0381d50>] (__platform_driver_register+0x58/0x6c) [ 1.680221] [<c0381d50>] (__platform_driver_register) from [<c0a467c8>] (rockchip_spi_driver_init+0x18/0x20) [ 1.690033] [<c0a467c8>] (rockchip_spi_driver_init) from [<c00098a4>] (do_one_initcall+0x124/0x1dc) [ 1.699063] [<c00098a4>] (do_one_initcall) from [<c0a19f84>] (kernel_init_freeable+0x218/0x2ec) [ 1.707748] [<c0a19f84>] (kernel_init_freeable) from [<c0719ed8>] (kernel_init+0x1c/0xf4) [ 1.715912] [<c0719ed8>] (kernel_init) from [<c000fe50>] (ret_from_fork+0x14/0x24) [ 1.723460] Code: e3510000 159f67c0 0a00000c e5961000 (e5d13000) [ 1.729564] ---[ end trace 95baa6b3b861ce25 ]--- Fixes: 06bb6f5a69df ("mtd: spi-nor: stop (ab)using struct spi_device_id") Signed-off-by: Brian Norris <computersforpeace@gmail.com> Cc: Rafał Miłecki <zajec5@gmail.com>
2015-09-02 23:34:35 +00:00
while (id->name) {
if (!strcmp(name, id->name))
return id;
id++;
}
return NULL;
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
MODULE_AUTHOR("Mike Lavender");
MODULE_DESCRIPTION("framework for SPI NOR");