|
@@ -211,9 +211,7 @@ static void reset_bank(struct denali_nand_info *denali)
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
|
|
|
|
if (irq_status & operation_timeout[denali->flash_bank])
|
|
if (irq_status & operation_timeout[denali->flash_bank])
|
|
- {
|
|
|
|
printk(KERN_ERR "reset bank failed.\n");
|
|
printk(KERN_ERR "reset bank failed.\n");
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* Reset the flash controller */
|
|
/* Reset the flash controller */
|
|
@@ -637,14 +635,12 @@ static void find_valid_banks(struct denali_nand_info *denali)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- if (denali->platform == INTEL_CE4100)
|
|
|
|
- {
|
|
|
|
|
|
+ if (denali->platform == INTEL_CE4100) {
|
|
/* Platform limitations of the CE4100 device limit
|
|
/* Platform limitations of the CE4100 device limit
|
|
* users to a single chip solution for NAND.
|
|
* users to a single chip solution for NAND.
|
|
* Multichip support is not enabled.
|
|
* Multichip support is not enabled.
|
|
*/
|
|
*/
|
|
- if (denali->total_used_banks != 1)
|
|
|
|
- {
|
|
|
|
|
|
+ if (denali->total_used_banks != 1) {
|
|
printk(KERN_ERR "Sorry, Intel CE4100 only supports "
|
|
printk(KERN_ERR "Sorry, Intel CE4100 only supports "
|
|
"a single NAND device.\n");
|
|
"a single NAND device.\n");
|
|
BUG();
|
|
BUG();
|
|
@@ -888,9 +884,7 @@ static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
|
|
* with a specific ONFI mode, we apply those changes here.
|
|
* with a specific ONFI mode, we apply those changes here.
|
|
*/
|
|
*/
|
|
if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
|
|
if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
|
|
- {
|
|
|
|
NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
|
|
NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
|
|
- }
|
|
|
|
|
|
|
|
return status;
|
|
return status;
|
|
}
|
|
}
|
|
@@ -997,9 +991,7 @@ static void print_irq_log(struct denali_nand_info *denali)
|
|
|
|
|
|
printk("ISR debug log index = %X\n", denali->idx);
|
|
printk("ISR debug log index = %X\n", denali->idx);
|
|
for (i = 0; i < 32; i++)
|
|
for (i = 0; i < 32; i++)
|
|
- {
|
|
|
|
printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
|
|
printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
|
|
- }
|
|
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -1018,12 +1010,10 @@ static irqreturn_t denali_isr(int irq, void *dev_id)
|
|
/* check to see if a valid NAND chip has
|
|
/* check to see if a valid NAND chip has
|
|
* been selected.
|
|
* been selected.
|
|
*/
|
|
*/
|
|
- if (is_flash_bank_valid(denali->flash_bank))
|
|
|
|
- {
|
|
|
|
|
|
+ if (is_flash_bank_valid(denali->flash_bank)) {
|
|
/* check to see if controller generated
|
|
/* check to see if controller generated
|
|
* the interrupt, since this is a shared interrupt */
|
|
* the interrupt, since this is a shared interrupt */
|
|
- if ((irq_status = denali_irq_detected(denali)) != 0)
|
|
|
|
- {
|
|
|
|
|
|
+ if ((irq_status = denali_irq_detected(denali)) != 0) {
|
|
#if DEBUG_DENALI
|
|
#if DEBUG_DENALI
|
|
denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
|
|
denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
|
|
denali->idx %= 32;
|
|
denali->idx %= 32;
|
|
@@ -1054,8 +1044,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
|
|
bool retry = false;
|
|
bool retry = false;
|
|
unsigned long timeout = msecs_to_jiffies(1000);
|
|
unsigned long timeout = msecs_to_jiffies(1000);
|
|
|
|
|
|
- do
|
|
|
|
- {
|
|
|
|
|
|
+ do {
|
|
#if DEBUG_DENALI
|
|
#if DEBUG_DENALI
|
|
printk("waiting for 0x%x\n", irq_mask);
|
|
printk("waiting for 0x%x\n", irq_mask);
|
|
#endif
|
|
#endif
|
|
@@ -1068,8 +1057,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
|
|
denali->idx %= 32;
|
|
denali->idx %= 32;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- if (intr_status & irq_mask)
|
|
|
|
- {
|
|
|
|
|
|
+ if (intr_status & irq_mask) {
|
|
denali->irq_status &= ~irq_mask;
|
|
denali->irq_status &= ~irq_mask;
|
|
spin_unlock_irq(&denali->irq_lock);
|
|
spin_unlock_irq(&denali->irq_lock);
|
|
#if DEBUG_DENALI
|
|
#if DEBUG_DENALI
|
|
@@ -1077,9 +1065,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
|
|
#endif
|
|
#endif
|
|
/* our interrupt was detected */
|
|
/* our interrupt was detected */
|
|
break;
|
|
break;
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
|
|
+ } else {
|
|
/* these are not the interrupts you are looking for -
|
|
/* these are not the interrupts you are looking for -
|
|
* need to wait again */
|
|
* need to wait again */
|
|
spin_unlock_irq(&denali->irq_lock);
|
|
spin_unlock_irq(&denali->irq_lock);
|
|
@@ -1092,8 +1078,7 @@ static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
|
|
}
|
|
}
|
|
} while (comp_res != 0);
|
|
} while (comp_res != 0);
|
|
|
|
|
|
- if (comp_res == 0)
|
|
|
|
- {
|
|
|
|
|
|
+ if (comp_res == 0) {
|
|
/* timeout */
|
|
/* timeout */
|
|
printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
|
|
printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
|
|
intr_status, irq_mask);
|
|
intr_status, irq_mask);
|
|
@@ -1149,22 +1134,17 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en
|
|
|
|
|
|
addr = BANK(denali->flash_bank) | denali->page;
|
|
addr = BANK(denali->flash_bank) | denali->page;
|
|
|
|
|
|
- if (op == DENALI_WRITE && access_type != SPARE_ACCESS)
|
|
|
|
- {
|
|
|
|
|
|
+ if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
|
|
cmd = MODE_01 | addr;
|
|
cmd = MODE_01 | addr;
|
|
denali_write32(cmd, denali->flash_mem);
|
|
denali_write32(cmd, denali->flash_mem);
|
|
- }
|
|
|
|
- else if (op == DENALI_WRITE && access_type == SPARE_ACCESS)
|
|
|
|
- {
|
|
|
|
|
|
+ } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
|
|
/* read spare area */
|
|
/* read spare area */
|
|
cmd = MODE_10 | addr;
|
|
cmd = MODE_10 | addr;
|
|
index_addr(denali, (uint32_t)cmd, access_type);
|
|
index_addr(denali, (uint32_t)cmd, access_type);
|
|
|
|
|
|
cmd = MODE_01 | addr;
|
|
cmd = MODE_01 | addr;
|
|
denali_write32(cmd, denali->flash_mem);
|
|
denali_write32(cmd, denali->flash_mem);
|
|
- }
|
|
|
|
- else if (op == DENALI_READ)
|
|
|
|
- {
|
|
|
|
|
|
+ } else if (op == DENALI_READ) {
|
|
/* setup page read request for access type */
|
|
/* setup page read request for access type */
|
|
cmd = MODE_10 | addr;
|
|
cmd = MODE_10 | addr;
|
|
index_addr(denali, (uint32_t)cmd, access_type);
|
|
index_addr(denali, (uint32_t)cmd, access_type);
|
|
@@ -1173,13 +1153,10 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en
|
|
use the pipeline commands in Spare area only mode. So we
|
|
use the pipeline commands in Spare area only mode. So we
|
|
don't.
|
|
don't.
|
|
*/
|
|
*/
|
|
- if (access_type == SPARE_ACCESS)
|
|
|
|
- {
|
|
|
|
|
|
+ if (access_type == SPARE_ACCESS) {
|
|
cmd = MODE_01 | addr;
|
|
cmd = MODE_01 | addr;
|
|
denali_write32(cmd, denali->flash_mem);
|
|
denali_write32(cmd, denali->flash_mem);
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
|
|
+ } else {
|
|
index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
|
|
index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
|
|
|
|
|
|
/* wait for command to be accepted
|
|
/* wait for command to be accepted
|
|
@@ -1187,14 +1164,11 @@ static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en
|
|
* bank. */
|
|
* bank. */
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
|
|
|
|
- if (irq_status == 0)
|
|
|
|
- {
|
|
|
|
|
|
+ if (irq_status == 0) {
|
|
printk(KERN_ERR "cmd, page, addr on timeout "
|
|
printk(KERN_ERR "cmd, page, addr on timeout "
|
|
"(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
|
|
"(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
|
|
status = FAIL;
|
|
status = FAIL;
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
|
|
+ } else {
|
|
cmd = MODE_01 | addr;
|
|
cmd = MODE_01 | addr;
|
|
denali_write32(cmd, denali->flash_mem);
|
|
denali_write32(cmd, denali->flash_mem);
|
|
}
|
|
}
|
|
@@ -1216,9 +1190,7 @@ static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_
|
|
/* write the data to the flash memory */
|
|
/* write the data to the flash memory */
|
|
buf32 = (uint32_t *)buf;
|
|
buf32 = (uint32_t *)buf;
|
|
for (i = 0; i < len / 4; i++)
|
|
for (i = 0; i < len / 4; i++)
|
|
- {
|
|
|
|
denali_write32(*buf32++, denali->flash_mem + 0x10);
|
|
denali_write32(*buf32++, denali->flash_mem + 0x10);
|
|
- }
|
|
|
|
return i*4; /* intent is to return the number of bytes read */
|
|
return i*4; /* intent is to return the number of bytes read */
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1241,9 +1213,7 @@ static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *bu
|
|
/* transfer the data from the flash */
|
|
/* transfer the data from the flash */
|
|
buf32 = (uint32_t *)buf;
|
|
buf32 = (uint32_t *)buf;
|
|
for (i = 0; i < len / 4; i++)
|
|
for (i = 0; i < len / 4; i++)
|
|
- {
|
|
|
|
*buf32++ = ioread32(denali->flash_mem + 0x10);
|
|
*buf32++ = ioread32(denali->flash_mem + 0x10);
|
|
- }
|
|
|
|
return i*4; /* intent is to return the number of bytes read */
|
|
return i*4; /* intent is to return the number of bytes read */
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1259,8 +1229,7 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
|
|
denali->page = page;
|
|
denali->page = page;
|
|
|
|
|
|
if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
|
|
if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
|
|
- DENALI_WRITE) == PASS)
|
|
|
|
- {
|
|
|
|
|
|
+ DENALI_WRITE) == PASS) {
|
|
write_data_to_flash_mem(denali, buf, mtd->oobsize);
|
|
write_data_to_flash_mem(denali, buf, mtd->oobsize);
|
|
|
|
|
|
#if DEBUG_DENALI
|
|
#if DEBUG_DENALI
|
|
@@ -1274,14 +1243,11 @@ static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
|
|
/* wait for operation to complete */
|
|
/* wait for operation to complete */
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
|
|
|
|
- if (irq_status == 0)
|
|
|
|
- {
|
|
|
|
|
|
+ if (irq_status == 0) {
|
|
printk(KERN_ERR "OOB write failed\n");
|
|
printk(KERN_ERR "OOB write failed\n");
|
|
status = -EIO;
|
|
status = -EIO;
|
|
}
|
|
}
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
|
|
+ } else {
|
|
printk(KERN_ERR "unable to send pipeline command\n");
|
|
printk(KERN_ERR "unable to send pipeline command\n");
|
|
status = -EIO;
|
|
status = -EIO;
|
|
}
|
|
}
|
|
@@ -1300,8 +1266,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
|
|
printk("read_oob %d\n", page);
|
|
printk("read_oob %d\n", page);
|
|
#endif
|
|
#endif
|
|
if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
|
|
if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
|
|
- DENALI_READ) == PASS)
|
|
|
|
- {
|
|
|
|
|
|
+ DENALI_READ) == PASS) {
|
|
read_data_from_flash_mem(denali, buf, mtd->oobsize);
|
|
read_data_from_flash_mem(denali, buf, mtd->oobsize);
|
|
|
|
|
|
/* wait for command to be accepted
|
|
/* wait for command to be accepted
|
|
@@ -1310,9 +1275,7 @@ static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
|
|
|
|
if (irq_status == 0)
|
|
if (irq_status == 0)
|
|
- {
|
|
|
|
printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
|
|
printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
|
|
- }
|
|
|
|
|
|
|
|
/* We set the device back to MAIN_ACCESS here as I observed
|
|
/* We set the device back to MAIN_ACCESS here as I observed
|
|
* instability with the controller if you do a block erase
|
|
* instability with the controller if you do a block erase
|
|
@@ -1340,12 +1303,8 @@ bool is_erased(uint8_t *buf, int len)
|
|
{
|
|
{
|
|
int i = 0;
|
|
int i = 0;
|
|
for (i = 0; i < len; i++)
|
|
for (i = 0; i < len; i++)
|
|
- {
|
|
|
|
if (buf[i] != 0xFF)
|
|
if (buf[i] != 0xFF)
|
|
- {
|
|
|
|
return false;
|
|
return false;
|
|
- }
|
|
|
|
- }
|
|
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
#define ECC_SECTOR_SIZE 512
|
|
#define ECC_SECTOR_SIZE 512
|
|
@@ -1362,15 +1321,13 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
|
|
{
|
|
{
|
|
bool check_erased_page = false;
|
|
bool check_erased_page = false;
|
|
|
|
|
|
- if (irq_status & INTR_STATUS0__ECC_ERR)
|
|
|
|
- {
|
|
|
|
|
|
+ if (irq_status & INTR_STATUS0__ECC_ERR) {
|
|
/* read the ECC errors. we'll ignore them for now */
|
|
/* read the ECC errors. we'll ignore them for now */
|
|
uint32_t err_address = 0, err_correction_info = 0;
|
|
uint32_t err_address = 0, err_correction_info = 0;
|
|
uint32_t err_byte = 0, err_sector = 0, err_device = 0;
|
|
uint32_t err_byte = 0, err_sector = 0, err_device = 0;
|
|
uint32_t err_correction_value = 0;
|
|
uint32_t err_correction_value = 0;
|
|
|
|
|
|
- do
|
|
|
|
- {
|
|
|
|
|
|
+ do {
|
|
err_address = ioread32(denali->flash_reg +
|
|
err_address = ioread32(denali->flash_reg +
|
|
ECC_ERROR_ADDRESS);
|
|
ECC_ERROR_ADDRESS);
|
|
err_sector = ECC_SECTOR(err_address);
|
|
err_sector = ECC_SECTOR(err_address);
|
|
@@ -1383,29 +1340,23 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
|
|
ECC_CORRECTION_VALUE(err_correction_info);
|
|
ECC_CORRECTION_VALUE(err_correction_info);
|
|
err_device = ECC_ERR_DEVICE(err_correction_info);
|
|
err_device = ECC_ERR_DEVICE(err_correction_info);
|
|
|
|
|
|
- if (ECC_ERROR_CORRECTABLE(err_correction_info))
|
|
|
|
- {
|
|
|
|
|
|
+ if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
|
|
/* offset in our buffer is computed as:
|
|
/* offset in our buffer is computed as:
|
|
sector number * sector size + offset in
|
|
sector number * sector size + offset in
|
|
sector
|
|
sector
|
|
*/
|
|
*/
|
|
int offset = err_sector * ECC_SECTOR_SIZE +
|
|
int offset = err_sector * ECC_SECTOR_SIZE +
|
|
err_byte;
|
|
err_byte;
|
|
- if (offset < denali->mtd.writesize)
|
|
|
|
- {
|
|
|
|
|
|
+ if (offset < denali->mtd.writesize) {
|
|
/* correct the ECC error */
|
|
/* correct the ECC error */
|
|
buf[offset] ^= err_correction_value;
|
|
buf[offset] ^= err_correction_value;
|
|
denali->mtd.ecc_stats.corrected++;
|
|
denali->mtd.ecc_stats.corrected++;
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
|
|
+ } else {
|
|
/* bummer, couldn't correct the error */
|
|
/* bummer, couldn't correct the error */
|
|
printk(KERN_ERR "ECC offset invalid\n");
|
|
printk(KERN_ERR "ECC offset invalid\n");
|
|
denali->mtd.ecc_stats.failed++;
|
|
denali->mtd.ecc_stats.failed++;
|
|
}
|
|
}
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
|
|
+ } else {
|
|
/* if the error is not correctable, need to
|
|
/* if the error is not correctable, need to
|
|
* look at the page to see if it is an erased page.
|
|
* look at the page to see if it is an erased page.
|
|
* if so, then it's not a real ECC error */
|
|
* if so, then it's not a real ECC error */
|
|
@@ -1482,8 +1433,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
|
|
/* copy buffer into DMA buffer */
|
|
/* copy buffer into DMA buffer */
|
|
memcpy(denali->buf.buf, buf, mtd->writesize);
|
|
memcpy(denali->buf.buf, buf, mtd->writesize);
|
|
|
|
|
|
- if (raw_xfer)
|
|
|
|
- {
|
|
|
|
|
|
+ if (raw_xfer) {
|
|
/* transfer the data to the spare area */
|
|
/* transfer the data to the spare area */
|
|
memcpy(denali->buf.buf + mtd->writesize,
|
|
memcpy(denali->buf.buf + mtd->writesize,
|
|
chip->oob_poi,
|
|
chip->oob_poi,
|
|
@@ -1500,8 +1450,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
|
|
/* wait for operation to complete */
|
|
/* wait for operation to complete */
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
irq_status = wait_for_irq(denali, irq_mask);
|
|
|
|
|
|
- if (irq_status == 0)
|
|
|
|
- {
|
|
|
|
|
|
+ if (irq_status == 0) {
|
|
printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
|
|
printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
|
|
denali->status =
|
|
denali->status =
|
|
(irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
|
|
(irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
|
|
@@ -1584,21 +1533,15 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
|
|
check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
|
|
check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
|
|
denali_enable_dma(denali, false);
|
|
denali_enable_dma(denali, false);
|
|
|
|
|
|
- if (check_erased_page)
|
|
|
|
- {
|
|
|
|
|
|
+ if (check_erased_page) {
|
|
read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
|
|
read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
|
|
|
|
|
|
/* check ECC failures that may have occurred on erased pages */
|
|
/* check ECC failures that may have occurred on erased pages */
|
|
- if (check_erased_page)
|
|
|
|
- {
|
|
|
|
|
|
+ if (check_erased_page) {
|
|
if (!is_erased(buf, denali->mtd.writesize))
|
|
if (!is_erased(buf, denali->mtd.writesize))
|
|
- {
|
|
|
|
denali->mtd.ecc_stats.failed++;
|
|
denali->mtd.ecc_stats.failed++;
|
|
- }
|
|
|
|
if (!is_erased(buf, denali->mtd.oobsize))
|
|
if (!is_erased(buf, denali->mtd.oobsize))
|
|
- {
|
|
|
|
denali->mtd.ecc_stats.failed++;
|
|
denali->mtd.ecc_stats.failed++;
|
|
- }
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
@@ -1643,9 +1586,7 @@ static uint8_t denali_read_byte(struct mtd_info *mtd)
|
|
uint8_t result = 0xff;
|
|
uint8_t result = 0xff;
|
|
|
|
|
|
if (denali->buf.head < denali->buf.tail)
|
|
if (denali->buf.head < denali->buf.tail)
|
|
- {
|
|
|
|
result = denali->buf.buf[denali->buf.head++];
|
|
result = denali->buf.buf[denali->buf.head++];
|
|
- }
|
|
|
|
|
|
|
|
#if DEBUG_DENALI
|
|
#if DEBUG_DENALI
|
|
printk("read byte -> 0x%02x\n", result);
|
|
printk("read byte -> 0x%02x\n", result);
|
|
@@ -1708,8 +1649,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
|
|
#if DEBUG_DENALI
|
|
#if DEBUG_DENALI
|
|
printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
|
|
printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
|
|
#endif
|
|
#endif
|
|
- switch (cmd)
|
|
|
|
- {
|
|
|
|
|
|
+ switch (cmd) {
|
|
case NAND_CMD_PAGEPROG:
|
|
case NAND_CMD_PAGEPROG:
|
|
break;
|
|
break;
|
|
case NAND_CMD_STATUS:
|
|
case NAND_CMD_STATUS:
|
|
@@ -1717,8 +1657,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
|
|
break;
|
|
break;
|
|
case NAND_CMD_READID:
|
|
case NAND_CMD_READID:
|
|
reset_buf(denali);
|
|
reset_buf(denali);
|
|
- if (denali->flash_bank < denali->total_used_banks)
|
|
|
|
- {
|
|
|
|
|
|
+ if (denali->flash_bank < denali->total_used_banks) {
|
|
/* write manufacturer information into nand
|
|
/* write manufacturer information into nand
|
|
buffer for NAND subsystem to fetch.
|
|
buffer for NAND subsystem to fetch.
|
|
*/
|
|
*/
|
|
@@ -1727,9 +1666,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
|
|
write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
|
|
write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
|
|
write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
|
|
write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
|
|
write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
|
|
write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
|
|
+ } else {
|
|
int i;
|
|
int i;
|
|
for (i = 0; i < 5; i++)
|
|
for (i = 0; i < 5; i++)
|
|
write_byte_to_buf(denali, 0xff);
|
|
write_byte_to_buf(denali, 0xff);
|
|
@@ -1796,20 +1733,24 @@ static void denali_hw_init(struct denali_nand_info *denali)
|
|
static struct nand_ecclayout nand_oob_slc = {
|
|
static struct nand_ecclayout nand_oob_slc = {
|
|
.eccbytes = 4,
|
|
.eccbytes = 4,
|
|
.eccpos = { 0, 1, 2, 3 }, /* not used */
|
|
.eccpos = { 0, 1, 2, 3 }, /* not used */
|
|
- .oobfree = {{
|
|
|
|
|
|
+ .oobfree = {
|
|
|
|
+ {
|
|
.offset = ECC_BYTES_SLC,
|
|
.offset = ECC_BYTES_SLC,
|
|
.length = 64 - ECC_BYTES_SLC
|
|
.length = 64 - ECC_BYTES_SLC
|
|
- }}
|
|
|
|
|
|
+ }
|
|
|
|
+ }
|
|
};
|
|
};
|
|
|
|
|
|
#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
|
|
#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
|
|
static struct nand_ecclayout nand_oob_mlc_14bit = {
|
|
static struct nand_ecclayout nand_oob_mlc_14bit = {
|
|
.eccbytes = 14,
|
|
.eccbytes = 14,
|
|
.eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
|
|
.eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
|
|
- .oobfree = {{
|
|
|
|
|
|
+ .oobfree = {
|
|
|
|
+ {
|
|
.offset = ECC_BYTES_MLC,
|
|
.offset = ECC_BYTES_MLC,
|
|
.length = 64 - ECC_BYTES_MLC
|
|
.length = 64 - ECC_BYTES_MLC
|
|
- }}
|
|
|
|
|
|
+ }
|
|
|
|
+ }
|
|
};
|
|
};
|
|
|
|
|
|
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
|
|
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
|
|
@@ -1882,8 +1823,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
/* Due to a silicon limitation, we can only support
|
|
/* Due to a silicon limitation, we can only support
|
|
* ONFI timing mode 1 and below.
|
|
* ONFI timing mode 1 and below.
|
|
*/
|
|
*/
|
|
- if (onfi_timing_mode < -1 || onfi_timing_mode > 1)
|
|
|
|
- {
|
|
|
|
|
|
+ if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
|
|
printk("Intel CE4100 only supports ONFI timing mode 1 "
|
|
printk("Intel CE4100 only supports ONFI timing mode 1 "
|
|
"or below\n");
|
|
"or below\n");
|
|
ret = -EINVAL;
|
|
ret = -EINVAL;
|
|
@@ -1912,16 +1852,14 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
/* Is 32-bit DMA supported? */
|
|
/* Is 32-bit DMA supported? */
|
|
ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
|
|
ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
|
|
|
|
|
|
- if (ret)
|
|
|
|
- {
|
|
|
|
|
|
+ if (ret) {
|
|
printk(KERN_ERR "Spectra: no usable DMA configuration\n");
|
|
printk(KERN_ERR "Spectra: no usable DMA configuration\n");
|
|
goto failed_enable;
|
|
goto failed_enable;
|
|
}
|
|
}
|
|
denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
|
|
denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
|
|
PCI_DMA_BIDIRECTIONAL);
|
|
PCI_DMA_BIDIRECTIONAL);
|
|
|
|
|
|
- if (pci_dma_mapping_error(dev, denali->buf.dma_buf))
|
|
|
|
- {
|
|
|
|
|
|
+ if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
|
|
printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
|
|
printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
|
|
goto failed_enable;
|
|
goto failed_enable;
|
|
}
|
|
}
|
|
@@ -1978,8 +1916,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
/* MTD supported page sizes vary by kernel. We validate our
|
|
/* MTD supported page sizes vary by kernel. We validate our
|
|
* kernel supports the device here.
|
|
* kernel supports the device here.
|
|
*/
|
|
*/
|
|
- if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
|
|
|
|
- {
|
|
|
|
|
|
+ if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
|
|
ret = -ENODEV;
|
|
ret = -ENODEV;
|
|
printk(KERN_ERR "Spectra: device size not supported by this "
|
|
printk(KERN_ERR "Spectra: device size not supported by this "
|
|
"version of MTD.");
|
|
"version of MTD.");
|
|
@@ -2011,8 +1948,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
/* scan for NAND devices attached to the controller
|
|
/* scan for NAND devices attached to the controller
|
|
* this is the first stage in a two step process to register
|
|
* this is the first stage in a two step process to register
|
|
* with the nand subsystem */
|
|
* with the nand subsystem */
|
|
- if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL))
|
|
|
|
- {
|
|
|
|
|
|
+ if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
|
|
ret = -ENXIO;
|
|
ret = -ENXIO;
|
|
goto failed_nand;
|
|
goto failed_nand;
|
|
}
|
|
}
|
|
@@ -2029,13 +1965,10 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
|
|
denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
|
|
denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
|
|
denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
|
|
|
|
|
|
- if (denali->dev_info.MLCDevice)
|
|
|
|
- {
|
|
|
|
|
|
+ if (denali->dev_info.MLCDevice) {
|
|
denali->nand.ecc.layout = &nand_oob_mlc_14bit;
|
|
denali->nand.ecc.layout = &nand_oob_mlc_14bit;
|
|
denali->nand.ecc.bytes = ECC_BYTES_MLC;
|
|
denali->nand.ecc.bytes = ECC_BYTES_MLC;
|
|
- }
|
|
|
|
- else /* SLC */
|
|
|
|
- {
|
|
|
|
|
|
+ } else {/* SLC */
|
|
denali->nand.ecc.layout = &nand_oob_slc;
|
|
denali->nand.ecc.layout = &nand_oob_slc;
|
|
denali->nand.ecc.bytes = ECC_BYTES_SLC;
|
|
denali->nand.ecc.bytes = ECC_BYTES_SLC;
|
|
}
|
|
}
|
|
@@ -2057,8 +1990,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
denali->nand.ecc.write_oob = denali_write_oob;
|
|
denali->nand.ecc.write_oob = denali_write_oob;
|
|
denali->nand.erase_cmd = denali_erase;
|
|
denali->nand.erase_cmd = denali_erase;
|
|
|
|
|
|
- if (nand_scan_tail(&denali->mtd))
|
|
|
|
- {
|
|
|
|
|
|
+ if (nand_scan_tail(&denali->mtd)) {
|
|
ret = -ENXIO;
|
|
ret = -ENXIO;
|
|
goto failed_nand;
|
|
goto failed_nand;
|
|
}
|
|
}
|