SPI NOR fixes:

- Various fixes related to the SFDP parsing code merged in 4.20
 - Fix for a page fault in the cadence-qspi
 
 NAND fixes:
 - Fix a macro name conflict between the QCOM NAND controller driver
   and the RISC-V asm headers
 - Fix of-node handling in the atmel driver
 -----BEGIN PGP SIGNATURE-----
 
 iQJQBAABCgA6FiEEKmCqpbOU668PNA69Ze02AX4ItwAFAlv2Y7QcHGJvcmlzLmJy
 ZXppbGxvbkBib290bGluLmNvbQAKCRBl7TYBfgi3AOOoD/9FpLnRt20eUe4AFeio
 98Jxm5Hr8Eh0VEMEOeV2/06zJrkg1qpf0hKsdiQoX6pdWBfrce01oKpaK3lzSFso
 t9bWKrqDR1rGFsoN7BBlSHVSsHvCr6yWF9rFJImKB5gZOxO8CosHY2J2m1KKUC+v
 FRA4rZOCasOd6IQGlG+KUJyjKSOquSJyFHOV3K1HqhItExgUdwTOKJsuB/CZC0rd
 7ges2HfvRAJy8lE5gOnoJA3S3Z7UVCvOZl7+Gh+blRye17nqzYXO+9yPOHLp6ZVA
 nLdQZX7Z+5ZqkKnXfzSKkVSQosIGpHmvhr9rxzrOL2aXf96Hb+s1cAuXf1toYyGY
 bj2DtZ2FXs+0bHWBNmBayeheY59FuPRF9mH3AnV1tRCpP6XtnIBtqVQao1Pf30sR
 7N1bsHOOtfyITuaeR9jTRaPzVn70zCrhvWPdZvu0Ui6RRBACJi/mB6FzY+3dwsRy
 rMR823WUwxjDqfMfEp/KDP9lRHHDW+Hfcq+ZnidcqbFNIYyg8YYY9K2OFjYWXjNf
 S9XgBVDo4DlKR+eXyYYxjvpQwP6tnVfMrsnBU4dIIn74BTFIRX6gifjtiCd2eX39
 v6fLEtzaSR7nK00Q2V5rpKah5yjy21sV6TGIA/MIKfEfm60AeN8u9j1+melqKMn+
 QnbNZ6rTVzloUROD8OHAjcwUbQ==
 =9R0Y
 -----END PGP SIGNATURE-----

Merge tag 'mtd/fixes-for-4.20-rc4' of git://git.infradead.org/linux-mtd

Pull mtd fixes from Boris Brezillon:
 "SPI NOR fixes:

   - Various fixes related to the SFDP parsing code merged in 4.20

   - Fix for a page fault in the cadence-qspi

  NAND fixes:

   - Fix a macro name conflict between the QCOM NAND controller driver
     and the RISC-V asm headers

   - Fix of-node handling in the atmel driver"

* tag 'mtd/fixes-for-4.20-rc4' of git://git.infradead.org/linux-mtd:
  mtd: spi-nor: fix selection of uniform erase type in flexible conf
  mtd: spi-nor: Fix Cadence QSPI page fault kernel panic
  mtd: rawnand: qcom: Namespace prefix some commands
  mtd: rawnand: atmel: fix OF child-node lookup
  mtd: spi_nor: pass DMA-able buffer to spi_nor_read_raw()
  mtd: spi-nor: don't overwrite errno in spi_nor_get_map_in_use()
  mtd: spi-nor: fix iteration over smpt array
  mtd: spi-nor: don't drop sfdp data if optional parsers fail
This commit is contained in:
Linus Torvalds 2018-11-22 08:35:30 -08:00
commit ef4d6f2c0c
4 changed files with 137 additions and 55 deletions

View File

@ -2032,8 +2032,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
int ret; int ret;
nand_np = dev->of_node; nand_np = dev->of_node;
nfc_np = of_find_compatible_node(dev->of_node, NULL, nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
"atmel,sama5d3-nfc");
if (!nfc_np) { if (!nfc_np) {
dev_err(dev, "Could not find device node for sama5d3-nfc\n"); dev_err(dev, "Could not find device node for sama5d3-nfc\n");
return -ENODEV; return -ENODEV;
@ -2447,15 +2446,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
} }
if (caps->legacy_of_bindings) { if (caps->legacy_of_bindings) {
struct device_node *nfc_node;
u32 ale_offs = 21; u32 ale_offs = 21;
/* /*
* If we are parsing legacy DT props and the DT contains a * If we are parsing legacy DT props and the DT contains a
* valid NFC node, forward the request to the sama5 logic. * valid NFC node, forward the request to the sama5 logic.
*/ */
if (of_find_compatible_node(pdev->dev.of_node, NULL, nfc_node = of_get_compatible_child(pdev->dev.of_node,
"atmel,sama5d3-nfc")) "atmel,sama5d3-nfc");
if (nfc_node) {
caps = &atmel_sama5_nand_caps; caps = &atmel_sama5_nand_caps;
of_node_put(nfc_node);
}
/* /*
* Even if the compatible says we are dealing with an * Even if the compatible says we are dealing with an

View File

@ -150,15 +150,15 @@
#define NAND_VERSION_MINOR_SHIFT 16 #define NAND_VERSION_MINOR_SHIFT 16
/* NAND OP_CMDs */ /* NAND OP_CMDs */
#define PAGE_READ 0x2 #define OP_PAGE_READ 0x2
#define PAGE_READ_WITH_ECC 0x3 #define OP_PAGE_READ_WITH_ECC 0x3
#define PAGE_READ_WITH_ECC_SPARE 0x4 #define OP_PAGE_READ_WITH_ECC_SPARE 0x4
#define PROGRAM_PAGE 0x6 #define OP_PROGRAM_PAGE 0x6
#define PAGE_PROGRAM_WITH_ECC 0x7 #define OP_PAGE_PROGRAM_WITH_ECC 0x7
#define PROGRAM_PAGE_SPARE 0x9 #define OP_PROGRAM_PAGE_SPARE 0x9
#define BLOCK_ERASE 0xa #define OP_BLOCK_ERASE 0xa
#define FETCH_ID 0xb #define OP_FETCH_ID 0xb
#define RESET_DEVICE 0xd #define OP_RESET_DEVICE 0xd
/* Default Value for NAND_DEV_CMD_VLD */ /* Default Value for NAND_DEV_CMD_VLD */
#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
if (read) { if (read) {
if (host->use_ecc) if (host->use_ecc)
cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
else else
cmd = PAGE_READ | PAGE_ACC | LAST_PAGE; cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
} else { } else {
cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
} }
if (host->use_ecc) { if (host->use_ecc) {
@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host)
* in use. we configure the controller to perform a raw read of 512 * in use. we configure the controller to perform a raw read of 512
* bytes to read onfi params * bytes to read onfi params
*/ */
nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE); nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
nandc_set_reg(nandc, NAND_ADDR0, 0); nandc_set_reg(nandc, NAND_ADDR0, 0);
nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
nandc_set_reg(nandc, NAND_FLASH_CMD, nandc_set_reg(nandc, NAND_FLASH_CMD,
BLOCK_ERASE | PAGE_ACC | LAST_PAGE); OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
nandc_set_reg(nandc, NAND_ADDR0, page_addr); nandc_set_reg(nandc, NAND_ADDR0, page_addr);
nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_DEV0_CFG0, nandc_set_reg(nandc, NAND_DEV0_CFG0,
@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column)
if (column == -1) if (column == -1)
return 0; return 0;
nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
nandc_set_reg(nandc, NAND_ADDR0, column); nandc_set_reg(nandc, NAND_ADDR0, column);
nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_ADDR1, 0);
nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host)
struct nand_chip *chip = &host->chip; struct nand_chip *chip = &host->chip;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
nandc_set_reg(nandc, NAND_EXEC_CMD, 1); nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);

View File

@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
ndelay(cqspi->wr_delay); ndelay(cqspi->wr_delay);
while (remaining > 0) { while (remaining > 0) {
size_t write_words, mod_bytes;
write_bytes = remaining > page_size ? page_size : remaining; write_bytes = remaining > page_size ? page_size : remaining;
iowrite32_rep(cqspi->ahb_base, txbuf, write_words = write_bytes / 4;
DIV_ROUND_UP(write_bytes, 4)); mod_bytes = write_bytes % 4;
/* Write 4 bytes at a time then single bytes. */
if (write_words) {
iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
txbuf += (write_words * 4);
}
if (mod_bytes) {
unsigned int temp = 0xFFFFFFFF;
memcpy(&temp, txbuf, mod_bytes);
iowrite32(temp, cqspi->ahb_base);
txbuf += mod_bytes;
}
if (!wait_for_completion_timeout(&cqspi->transfer_complete, if (!wait_for_completion_timeout(&cqspi->transfer_complete,
msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
goto failwr; goto failwr;
} }
txbuf += write_bytes;
remaining -= write_bytes; remaining -= write_bytes;
if (remaining > 0) if (remaining > 0)

View File

@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
* @nor: pointer to a 'struct spi_nor' * @nor: pointer to a 'struct spi_nor'
* @addr: offset in the serial flash memory * @addr: offset in the serial flash memory
* @len: number of bytes to read * @len: number of bytes to read
* @buf: buffer where the data is copied into * @buf: buffer where the data is copied into (dma-safe memory)
* *
* Return: 0 on success, -errno otherwise. * Return: 0 on success, -errno otherwise.
*/ */
@ -2521,6 +2521,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
return left->size - right->size; return left->size - right->size;
} }
/**
* spi_nor_sort_erase_mask() - sort erase mask
* @map: the erase map of the SPI NOR
* @erase_mask: the erase type mask to be sorted
*
* Replicate the sort done for the map's erase types in BFPT: sort the erase
* mask in ascending order with the smallest erase type size starting from
* BIT(0) in the sorted erase mask.
*
* Return: sorted erase mask.
*/
static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
{
struct spi_nor_erase_type *erase_type = map->erase_type;
int i;
u8 sorted_erase_mask = 0;
if (!erase_mask)
return 0;
/* Replicate the sort done for the map's erase types. */
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
sorted_erase_mask |= BIT(i);
return sorted_erase_mask;
}
/** /**
* spi_nor_regions_sort_erase_types() - sort erase types in each region * spi_nor_regions_sort_erase_types() - sort erase types in each region
* @map: the erase map of the SPI NOR * @map: the erase map of the SPI NOR
@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
{ {
struct spi_nor_erase_region *region = map->regions; struct spi_nor_erase_region *region = map->regions;
struct spi_nor_erase_type *erase_type = map->erase_type;
int i;
u8 region_erase_mask, sorted_erase_mask; u8 region_erase_mask, sorted_erase_mask;
while (region) { while (region) {
region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
/* Replicate the sort done for the map's erase types. */ sorted_erase_mask = spi_nor_sort_erase_mask(map,
sorted_erase_mask = 0; region_erase_mask);
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (erase_type[i].size &&
region_erase_mask & BIT(erase_type[i].idx))
sorted_erase_mask |= BIT(i);
/* Overwrite erase mask. */ /* Overwrite erase mask. */
region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
* spi_nor_get_map_in_use() - get the configuration map in use * spi_nor_get_map_in_use() - get the configuration map in use
* @nor: pointer to a 'struct spi_nor' * @nor: pointer to a 'struct spi_nor'
* @smpt: pointer to the sector map parameter table * @smpt: pointer to the sector map parameter table
* @smpt_len: sector map parameter table length
*
* Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
*/ */
static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt) static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
u8 smpt_len)
{ {
const u32 *ret = NULL; const u32 *ret;
u32 i, addr; u8 *buf;
u32 addr;
int err; int err;
u8 i;
u8 addr_width, read_opcode, read_dummy; u8 addr_width, read_opcode, read_dummy;
u8 read_data_mask, data_byte, map_id; u8 read_data_mask, map_id;
/* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
addr_width = nor->addr_width; addr_width = nor->addr_width;
read_dummy = nor->read_dummy; read_dummy = nor->read_dummy;
read_opcode = nor->read_opcode; read_opcode = nor->read_opcode;
map_id = 0; map_id = 0;
i = 0;
/* Determine if there are any optional Detection Command Descriptors */ /* Determine if there are any optional Detection Command Descriptors */
while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) { for (i = 0; i < smpt_len; i += 2) {
if (smpt[i] & SMPT_DESC_TYPE_MAP)
break;
read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
addr = smpt[i + 1]; addr = smpt[i + 1];
err = spi_nor_read_raw(nor, addr, 1, &data_byte); err = spi_nor_read_raw(nor, addr, 1, buf);
if (err) if (err) {
ret = ERR_PTR(err);
goto out; goto out;
}
/* /*
* Build an index value that is used to select the Sector Map * Build an index value that is used to select the Sector Map
* Configuration that is currently in use. * Configuration that is currently in use.
*/ */
map_id = map_id << 1 | !!(data_byte & read_data_mask); map_id = map_id << 1 | !!(*buf & read_data_mask);
i = i + 2;
} }
/* Find the matching configuration map */ /*
while (SMPT_MAP_ID(smpt[i]) != map_id) { * If command descriptors are provided, they always precede map
* descriptors in the table. There is no need to start the iteration
* over smpt array all over again.
*
* Find the matching configuration map.
*/
ret = ERR_PTR(-EINVAL);
while (i < smpt_len) {
if (SMPT_MAP_ID(smpt[i]) == map_id) {
ret = smpt + i;
break;
}
/*
* If there are no more configuration map descriptors and no
* configuration ID matched the configuration identifier, the
* sector address map is unknown.
*/
if (smpt[i] & SMPT_DESC_END) if (smpt[i] & SMPT_DESC_END)
goto out; break;
/* increment the table index to the next map */ /* increment the table index to the next map */
i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
} }
ret = smpt + i;
/* fall through */ /* fall through */
out: out:
kfree(buf);
nor->addr_width = addr_width; nor->addr_width = addr_width;
nor->read_dummy = read_dummy; nor->read_dummy = read_dummy;
nor->read_opcode = read_opcode; nor->read_opcode = read_opcode;
@ -2946,7 +3000,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
u64 offset; u64 offset;
u32 region_count; u32 region_count;
int i, j; int i, j;
u8 erase_type; u8 erase_type, uniform_erase_type;
region_count = SMPT_MAP_REGION_COUNT(*smpt); region_count = SMPT_MAP_REGION_COUNT(*smpt);
/* /*
@ -2959,7 +3013,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
return -ENOMEM; return -ENOMEM;
map->regions = region; map->regions = region;
map->uniform_erase_type = 0xff; uniform_erase_type = 0xff;
offset = 0; offset = 0;
/* Populate regions. */ /* Populate regions. */
for (i = 0; i < region_count; i++) { for (i = 0; i < region_count; i++) {
@ -2974,12 +3028,15 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
* Save the erase types that are supported in all regions and * Save the erase types that are supported in all regions and
* can erase the entire flash memory. * can erase the entire flash memory.
*/ */
map->uniform_erase_type &= erase_type; uniform_erase_type &= erase_type;
offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
region[i].size; region[i].size;
} }
map->uniform_erase_type = spi_nor_sort_erase_mask(map,
uniform_erase_type);
spi_nor_region_mark_end(&region[i - 1]); spi_nor_region_mark_end(&region[i - 1]);
return 0; return 0;
@ -3020,9 +3077,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
for (i = 0; i < smpt_header->length; i++) for (i = 0; i < smpt_header->length; i++)
smpt[i] = le32_to_cpu(smpt[i]); smpt[i] = le32_to_cpu(smpt[i]);
sector_map = spi_nor_get_map_in_use(nor, smpt); sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
if (!sector_map) { if (IS_ERR(sector_map)) {
ret = -EINVAL; ret = PTR_ERR(sector_map);
goto out; goto out;
} }
@ -3125,7 +3182,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
if (err) if (err)
goto exit; goto exit;
/* Parse other parameter headers. */ /* Parse optional parameter tables. */
for (i = 0; i < header.nph; i++) { for (i = 0; i < header.nph; i++) {
param_header = &param_headers[i]; param_header = &param_headers[i];
@ -3138,8 +3195,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
break; break;
} }
if (err) if (err) {
goto exit; dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
SFDP_PARAM_HEADER_ID(param_header));
/*
* Let's not drop all information we extracted so far
* if optional table parsers fail. In case of failing,
* each optional parser is responsible to roll back to
* the previously known spi_nor data.
*/
err = 0;
}
} }
exit: exit: