blob: ca2b6b8850ba862ad0e46c4784ef973d84c4d31a [file] [log] [blame]
Jason Robertsce082592010-05-13 15:57:33 +01001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
Jason Robertsce082592010-05-13 15:57:33 +010019#include <linux/interrupt.h>
20#include <linux/delay.h>
Jamie Iles84457942011-05-06 15:28:55 +010021#include <linux/dma-mapping.h>
Jason Robertsce082592010-05-13 15:57:33 +010022#include <linux/wait.h>
23#include <linux/mutex.h>
Jason Robertsce082592010-05-13 15:57:33 +010024#include <linux/mtd/mtd.h>
25#include <linux/module.h>
26
27#include "denali.h"
28
29MODULE_LICENSE("GPL");
30
Jason Robertsce082592010-05-13 15:57:33 +010031#define DENALI_NAND_NAME "denali-nand"
32
Masahiro Yamada43914a22014-09-09 11:01:51 +090033/*
34 * We define a macro here that combines all interrupts this driver uses into
35 * a single constant value, for convenience.
36 */
Masahiro Yamada1aded582017-03-23 05:07:06 +090037#define DENALI_IRQ_ALL (INTR__DMA_CMD_COMP | \
38 INTR__ECC_TRANSACTION_DONE | \
39 INTR__ECC_ERR | \
40 INTR__PROGRAM_FAIL | \
41 INTR__LOAD_COMP | \
42 INTR__PROGRAM_COMP | \
43 INTR__TIME_OUT | \
44 INTR__ERASE_FAIL | \
45 INTR__RST_COMP | \
46 INTR__ERASE_COMP)
Jason Robertsce082592010-05-13 15:57:33 +010047
Masahiro Yamada43914a22014-09-09 11:01:51 +090048/*
49 * indicates whether or not the internal value for the flash bank is
50 * valid or not
51 */
Chuanxiao5bac3acf2010-08-05 23:06:04 +080052#define CHIP_SELECT_INVALID -1
Jason Robertsce082592010-05-13 15:57:33 +010053
Masahiro Yamada43914a22014-09-09 11:01:51 +090054/*
Masahiro Yamada1bb88662017-06-13 22:45:37 +090055 * The bus interface clock, clk_x, is phase aligned with the core clock. The
56 * clk_x is an integral multiple N of the core clk. The value N is configured
57 * at IP delivery time, and its available value is 4, 5, or 6. We need to align
58 * to the largest value to make it work with any possible configuration.
Masahiro Yamada43914a22014-09-09 11:01:51 +090059 */
Masahiro Yamada1bb88662017-06-13 22:45:37 +090060#define DENALI_CLK_X_MULT 6
Jason Robertsce082592010-05-13 15:57:33 +010061
Masahiro Yamada43914a22014-09-09 11:01:51 +090062/*
63 * this macro allows us to convert from an MTD structure to our own
Jason Robertsce082592010-05-13 15:57:33 +010064 * device context (denali) structure.
65 */
Boris BREZILLON442f201b2015-12-11 15:06:00 +010066static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
67{
68 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
69}
Jason Robertsce082592010-05-13 15:57:33 +010070
Masahiro Yamada43914a22014-09-09 11:01:51 +090071/*
72 * These constants are defined by the driver to enable common driver
73 * configuration options.
74 */
Jason Robertsce082592010-05-13 15:57:33 +010075#define SPARE_ACCESS 0x41
76#define MAIN_ACCESS 0x42
77#define MAIN_SPARE_ACCESS 0x43
78
79#define DENALI_READ 0
80#define DENALI_WRITE 0x100
81
Masahiro Yamada43914a22014-09-09 11:01:51 +090082/*
83 * this is a helper macro that allows us to
84 * format the bank into the proper bits for the controller
85 */
Jason Robertsce082592010-05-13 15:57:33 +010086#define BANK(x) ((x) << 24)
87
Jason Robertsce082592010-05-13 15:57:33 +010088/* forward declarations */
89static void clear_interrupts(struct denali_nand_info *denali);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080090static uint32_t wait_for_irq(struct denali_nand_info *denali,
91 uint32_t irq_mask);
92static void denali_irq_enable(struct denali_nand_info *denali,
93 uint32_t int_mask);
Jason Robertsce082592010-05-13 15:57:33 +010094static uint32_t read_interrupt_status(struct denali_nand_info *denali);
95
Masahiro Yamada43914a22014-09-09 11:01:51 +090096/*
97 * Certain operations for the denali NAND controller use an indexed mode to
98 * read/write data. The operation is performed by writing the address value
99 * of the command to the device memory followed by the data. This function
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800100 * abstracts this common operation.
Masahiro Yamada43914a22014-09-09 11:01:51 +0900101 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800102static void index_addr(struct denali_nand_info *denali,
103 uint32_t address, uint32_t data)
Jason Robertsce082592010-05-13 15:57:33 +0100104{
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800105 iowrite32(address, denali->flash_mem);
106 iowrite32(data, denali->flash_mem + 0x10);
Jason Robertsce082592010-05-13 15:57:33 +0100107}
108
109/* Perform an indexed read of the device */
110static void index_addr_read_data(struct denali_nand_info *denali,
111 uint32_t address, uint32_t *pdata)
112{
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800113 iowrite32(address, denali->flash_mem);
Jason Robertsce082592010-05-13 15:57:33 +0100114 *pdata = ioread32(denali->flash_mem + 0x10);
115}
116
Masahiro Yamada43914a22014-09-09 11:01:51 +0900117/*
118 * We need to buffer some data for some of the NAND core routines.
119 * The operations manage buffering that data.
120 */
Jason Robertsce082592010-05-13 15:57:33 +0100121static void reset_buf(struct denali_nand_info *denali)
122{
123 denali->buf.head = denali->buf.tail = 0;
124}
125
126static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
127{
Jason Robertsce082592010-05-13 15:57:33 +0100128 denali->buf.buf[denali->buf.tail++] = byte;
129}
130
131/* reads the status of the device */
132static void read_status(struct denali_nand_info *denali)
133{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900134 uint32_t cmd;
Jason Robertsce082592010-05-13 15:57:33 +0100135
136 /* initialize the data buffer to store status */
137 reset_buf(denali);
138
Chuanxiao Dongf0bc0c72010-08-11 17:14:59 +0800139 cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
140 if (cmd)
141 write_byte_to_buf(denali, NAND_STATUS_WP);
142 else
143 write_byte_to_buf(denali, 0);
Jason Robertsce082592010-05-13 15:57:33 +0100144}
145
146/* resets a specific device connected to the core */
147static void reset_bank(struct denali_nand_info *denali)
148{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900149 uint32_t irq_status;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900150 uint32_t irq_mask = INTR__RST_COMP | INTR__TIME_OUT;
Jason Robertsce082592010-05-13 15:57:33 +0100151
152 clear_interrupts(denali);
153
Jamie Iles9589bf52011-05-06 15:28:56 +0100154 iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
Jason Robertsce082592010-05-13 15:57:33 +0100155
156 irq_status = wait_for_irq(denali, irq_mask);
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800157
Masahiro Yamada1aded582017-03-23 05:07:06 +0900158 if (irq_status & INTR__TIME_OUT)
Jamie Iles84457942011-05-06 15:28:55 +0100159 dev_err(denali->dev, "reset bank failed.\n");
Jason Robertsce082592010-05-13 15:57:33 +0100160}
161
162/* Reset the flash controller */
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800163static uint16_t denali_nand_reset(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100164{
Masahiro Yamada93e3c8a2014-09-09 11:01:54 +0900165 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100166
Masahiro Yamada81254502014-09-16 20:04:25 +0900167 for (i = 0; i < denali->max_banks; i++)
Masahiro Yamada1aded582017-03-23 05:07:06 +0900168 iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
Jamie Iles9589bf52011-05-06 15:28:56 +0100169 denali->flash_reg + INTR_STATUS(i));
Jason Robertsce082592010-05-13 15:57:33 +0100170
Masahiro Yamada81254502014-09-16 20:04:25 +0900171 for (i = 0; i < denali->max_banks; i++) {
Jamie Iles9589bf52011-05-06 15:28:56 +0100172 iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
Masahiro Yamada81254502014-09-16 20:04:25 +0900173 while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
Masahiro Yamada1aded582017-03-23 05:07:06 +0900174 (INTR__RST_COMP | INTR__TIME_OUT)))
Chuanxiao Dong628bfd412010-08-11 17:53:29 +0800175 cpu_relax();
Jamie Iles9589bf52011-05-06 15:28:56 +0100176 if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
Masahiro Yamada1aded582017-03-23 05:07:06 +0900177 INTR__TIME_OUT)
Jamie Iles84457942011-05-06 15:28:55 +0100178 dev_dbg(denali->dev,
Jason Robertsce082592010-05-13 15:57:33 +0100179 "NAND Reset operation timed out on bank %d\n", i);
180 }
181
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100182 for (i = 0; i < denali->max_banks; i++)
Masahiro Yamada1aded582017-03-23 05:07:06 +0900183 iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
Masahiro Yamada81254502014-09-16 20:04:25 +0900184 denali->flash_reg + INTR_STATUS(i));
Jason Robertsce082592010-05-13 15:57:33 +0100185
186 return PASS;
187}
188
Masahiro Yamada43914a22014-09-09 11:01:51 +0900189/*
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100190 * Use the configuration feature register to determine the maximum number of
191 * banks that the hardware supports.
192 */
193static void detect_max_banks(struct denali_nand_info *denali)
194{
195 uint32_t features = ioread32(denali->flash_reg + FEATURES);
196
Masahiro Yamadae7beeee2017-03-30 15:45:57 +0900197 denali->max_banks = 1 << (features & FEATURES__N_BANKS);
198
199 /* the encoding changed from rev 5.0 to 5.1 */
200 if (denali->revision < 0x0501)
201 denali->max_banks <<= 1;
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100202}
203
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800204static void denali_set_intr_modes(struct denali_nand_info *denali,
Jason Robertsce082592010-05-13 15:57:33 +0100205 uint16_t INT_ENABLE)
206{
Jason Robertsce082592010-05-13 15:57:33 +0100207 if (INT_ENABLE)
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800208 iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100209 else
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800210 iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100211}
212
Masahiro Yamada43914a22014-09-09 11:01:51 +0900213/*
214 * validation function to verify that the controlling software is making
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800215 * a valid request
Jason Robertsce082592010-05-13 15:57:33 +0100216 */
217static inline bool is_flash_bank_valid(int flash_bank)
218{
Masahiro Yamada7d14ecd2014-09-16 20:04:24 +0900219 return flash_bank >= 0 && flash_bank < 4;
Jason Robertsce082592010-05-13 15:57:33 +0100220}
221
222static void denali_irq_init(struct denali_nand_info *denali)
223{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900224 uint32_t int_mask;
Jamie Iles9589bf52011-05-06 15:28:56 +0100225 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100226
227 /* Disable global interrupts */
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800228 denali_set_intr_modes(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100229
230 int_mask = DENALI_IRQ_ALL;
231
232 /* Clear all status bits */
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100233 for (i = 0; i < denali->max_banks; ++i)
Jamie Iles9589bf52011-05-06 15:28:56 +0100234 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
Jason Robertsce082592010-05-13 15:57:33 +0100235
236 denali_irq_enable(denali, int_mask);
237}
238
239static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
240{
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800241 denali_set_intr_modes(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100242}
243
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800244static void denali_irq_enable(struct denali_nand_info *denali,
245 uint32_t int_mask)
Jason Robertsce082592010-05-13 15:57:33 +0100246{
Jamie Iles9589bf52011-05-06 15:28:56 +0100247 int i;
248
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100249 for (i = 0; i < denali->max_banks; ++i)
Jamie Iles9589bf52011-05-06 15:28:56 +0100250 iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
Jason Robertsce082592010-05-13 15:57:33 +0100251}
252
Masahiro Yamada43914a22014-09-09 11:01:51 +0900253/*
254 * This function only returns when an interrupt that this driver cares about
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800255 * occurs. This is to reduce the overhead of servicing interrupts
Jason Robertsce082592010-05-13 15:57:33 +0100256 */
257static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
258{
Chuanxiao Donga99d1792010-07-27 11:32:21 +0800259 return read_interrupt_status(denali) & DENALI_IRQ_ALL;
Jason Robertsce082592010-05-13 15:57:33 +0100260}
261
262/* Interrupts are cleared by writing a 1 to the appropriate status bit */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800263static inline void clear_interrupt(struct denali_nand_info *denali,
264 uint32_t irq_mask)
Jason Robertsce082592010-05-13 15:57:33 +0100265{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900266 uint32_t intr_status_reg;
Jason Robertsce082592010-05-13 15:57:33 +0100267
Jamie Iles9589bf52011-05-06 15:28:56 +0100268 intr_status_reg = INTR_STATUS(denali->flash_bank);
Jason Robertsce082592010-05-13 15:57:33 +0100269
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800270 iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
Jason Robertsce082592010-05-13 15:57:33 +0100271}
272
273static void clear_interrupts(struct denali_nand_info *denali)
274{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900275 uint32_t status;
276
Jason Robertsce082592010-05-13 15:57:33 +0100277 spin_lock_irq(&denali->irq_lock);
278
279 status = read_interrupt_status(denali);
Chuanxiao Dong8ae61eb2010-08-10 00:07:01 +0800280 clear_interrupt(denali, status);
Jason Robertsce082592010-05-13 15:57:33 +0100281
Jason Robertsce082592010-05-13 15:57:33 +0100282 denali->irq_status = 0x0;
283 spin_unlock_irq(&denali->irq_lock);
284}
285
286static uint32_t read_interrupt_status(struct denali_nand_info *denali)
287{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900288 uint32_t intr_status_reg;
Jason Robertsce082592010-05-13 15:57:33 +0100289
Jamie Iles9589bf52011-05-06 15:28:56 +0100290 intr_status_reg = INTR_STATUS(denali->flash_bank);
Jason Robertsce082592010-05-13 15:57:33 +0100291
292 return ioread32(denali->flash_reg + intr_status_reg);
293}
294
Masahiro Yamada43914a22014-09-09 11:01:51 +0900295/*
296 * This is the interrupt service routine. It handles all interrupts
297 * sent to this device. Note that on CE4100, this is a shared interrupt.
Jason Robertsce082592010-05-13 15:57:33 +0100298 */
299static irqreturn_t denali_isr(int irq, void *dev_id)
300{
301 struct denali_nand_info *denali = dev_id;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900302 uint32_t irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100303 irqreturn_t result = IRQ_NONE;
304
305 spin_lock(&denali->irq_lock);
306
Masahiro Yamada43914a22014-09-09 11:01:51 +0900307 /* check to see if a valid NAND chip has been selected. */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800308 if (is_flash_bank_valid(denali->flash_bank)) {
Masahiro Yamada43914a22014-09-09 11:01:51 +0900309 /*
310 * check to see if controller generated the interrupt,
311 * since this is a shared interrupt
312 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800313 irq_status = denali_irq_detected(denali);
314 if (irq_status != 0) {
Jason Robertsce082592010-05-13 15:57:33 +0100315 /* handle interrupt */
316 /* first acknowledge it */
317 clear_interrupt(denali, irq_status);
Masahiro Yamada43914a22014-09-09 11:01:51 +0900318 /*
319 * store the status in the device context for someone
320 * to read
321 */
Jason Robertsce082592010-05-13 15:57:33 +0100322 denali->irq_status |= irq_status;
323 /* notify anyone who cares that it happened */
324 complete(&denali->complete);
325 /* tell the OS that we've handled this */
326 result = IRQ_HANDLED;
327 }
328 }
329 spin_unlock(&denali->irq_lock);
330 return result;
331}
Jason Robertsce082592010-05-13 15:57:33 +0100332
333static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
334{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900335 unsigned long comp_res;
336 uint32_t intr_status;
Jason Robertsce082592010-05-13 15:57:33 +0100337 unsigned long timeout = msecs_to_jiffies(1000);
338
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800339 do {
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800340 comp_res =
341 wait_for_completion_timeout(&denali->complete, timeout);
Jason Robertsce082592010-05-13 15:57:33 +0100342 spin_lock_irq(&denali->irq_lock);
343 intr_status = denali->irq_status;
344
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800345 if (intr_status & irq_mask) {
Jason Robertsce082592010-05-13 15:57:33 +0100346 denali->irq_status &= ~irq_mask;
347 spin_unlock_irq(&denali->irq_lock);
Jason Robertsce082592010-05-13 15:57:33 +0100348 /* our interrupt was detected */
349 break;
Jason Robertsce082592010-05-13 15:57:33 +0100350 }
Masahiro Yamada81254502014-09-16 20:04:25 +0900351
352 /*
353 * these are not the interrupts you are looking for -
354 * need to wait again
355 */
356 spin_unlock_irq(&denali->irq_lock);
Jason Robertsce082592010-05-13 15:57:33 +0100357 } while (comp_res != 0);
358
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800359 if (comp_res == 0) {
Jason Robertsce082592010-05-13 15:57:33 +0100360 /* timeout */
Dinh Nguyen2a0a2882012-09-27 10:58:05 -0600361 pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800362 intr_status, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100363
364 intr_status = 0;
365 }
366 return intr_status;
367}
368
Masahiro Yamada43914a22014-09-09 11:01:51 +0900369/*
370 * This helper function setups the registers for ECC and whether or not
371 * the spare area will be transferred.
372 */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800373static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
Jason Robertsce082592010-05-13 15:57:33 +0100374 bool transfer_spare)
375{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900376 int ecc_en_flag, transfer_spare_flag;
Jason Robertsce082592010-05-13 15:57:33 +0100377
378 /* set ECC, transfer spare bits if needed */
379 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
380 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
381
382 /* Enable spare area/ECC per user's request. */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800383 iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
Masahiro Yamada81254502014-09-16 20:04:25 +0900384 iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
Jason Robertsce082592010-05-13 15:57:33 +0100385}
386
Masahiro Yamada43914a22014-09-09 11:01:51 +0900387/*
388 * sends a pipeline command operation to the controller. See the Denali NAND
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800389 * controller's user guide for more information (section 4.2.3.6).
Jason Robertsce082592010-05-13 15:57:33 +0100390 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800391static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
Masahiro Yamada81254502014-09-16 20:04:25 +0900392 bool ecc_en, bool transfer_spare,
393 int access_type, int op)
Jason Robertsce082592010-05-13 15:57:33 +0100394{
395 int status = PASS;
Masahiro Yamada8927ad32017-03-30 15:45:49 +0900396 uint32_t addr, cmd;
Jason Robertsce082592010-05-13 15:57:33 +0100397
398 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
399
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800400 clear_interrupts(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100401
402 addr = BANK(denali->flash_bank) | denali->page;
403
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800404 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800405 cmd = MODE_01 | addr;
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800406 iowrite32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800407 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
Jason Robertsce082592010-05-13 15:57:33 +0100408 /* read spare area */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800409 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900410 index_addr(denali, cmd, access_type);
Jason Robertsce082592010-05-13 15:57:33 +0100411
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800412 cmd = MODE_01 | addr;
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800413 iowrite32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800414 } else if (op == DENALI_READ) {
Jason Robertsce082592010-05-13 15:57:33 +0100415 /* setup page read request for access type */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800416 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900417 index_addr(denali, cmd, access_type);
Jason Robertsce082592010-05-13 15:57:33 +0100418
Masahiro Yamada8927ad32017-03-30 15:45:49 +0900419 cmd = MODE_01 | addr;
420 iowrite32(cmd, denali->flash_mem);
Jason Robertsce082592010-05-13 15:57:33 +0100421 }
422 return status;
423}
424
425/* helper function that simply writes a buffer to the flash */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800426static int write_data_to_flash_mem(struct denali_nand_info *denali,
Masahiro Yamada81254502014-09-16 20:04:25 +0900427 const uint8_t *buf, int len)
Jason Robertsce082592010-05-13 15:57:33 +0100428{
Masahiro Yamada93e3c8a2014-09-09 11:01:54 +0900429 uint32_t *buf32;
430 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100431
Masahiro Yamada43914a22014-09-09 11:01:51 +0900432 /*
433 * verify that the len is a multiple of 4.
434 * see comment in read_data_from_flash_mem()
435 */
Jason Robertsce082592010-05-13 15:57:33 +0100436 BUG_ON((len % 4) != 0);
437
438 /* write the data to the flash memory */
439 buf32 = (uint32_t *)buf;
440 for (i = 0; i < len / 4; i++)
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800441 iowrite32(*buf32++, denali->flash_mem + 0x10);
Masahiro Yamada81254502014-09-16 20:04:25 +0900442 return i * 4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +0100443}
444
445/* helper function that simply reads a buffer from the flash */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800446static int read_data_from_flash_mem(struct denali_nand_info *denali,
Masahiro Yamada81254502014-09-16 20:04:25 +0900447 uint8_t *buf, int len)
Jason Robertsce082592010-05-13 15:57:33 +0100448{
Masahiro Yamada93e3c8a2014-09-09 11:01:54 +0900449 uint32_t *buf32;
450 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100451
Masahiro Yamada43914a22014-09-09 11:01:51 +0900452 /*
453 * we assume that len will be a multiple of 4, if not it would be nice
454 * to know about it ASAP rather than have random failures...
455 * This assumption is based on the fact that this function is designed
456 * to be used to read flash pages, which are typically multiples of 4.
Jason Robertsce082592010-05-13 15:57:33 +0100457 */
Jason Robertsce082592010-05-13 15:57:33 +0100458 BUG_ON((len % 4) != 0);
459
460 /* transfer the data from the flash */
461 buf32 = (uint32_t *)buf;
462 for (i = 0; i < len / 4; i++)
Jason Robertsce082592010-05-13 15:57:33 +0100463 *buf32++ = ioread32(denali->flash_mem + 0x10);
Masahiro Yamada81254502014-09-16 20:04:25 +0900464 return i * 4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +0100465}
466
467/* writes OOB data to the device */
468static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
469{
470 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada5637b692014-09-09 11:01:52 +0900471 uint32_t irq_status;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900472 uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100473 int status = 0;
474
475 denali->page = page;
476
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800477 if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800478 DENALI_WRITE) == PASS) {
Jason Robertsce082592010-05-13 15:57:33 +0100479 write_data_to_flash_mem(denali, buf, mtd->oobsize);
480
Jason Robertsce082592010-05-13 15:57:33 +0100481 /* wait for operation to complete */
482 irq_status = wait_for_irq(denali, irq_mask);
483
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800484 if (irq_status == 0) {
Jamie Iles84457942011-05-06 15:28:55 +0100485 dev_err(denali->dev, "OOB write failed\n");
Jason Robertsce082592010-05-13 15:57:33 +0100486 status = -EIO;
487 }
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800488 } else {
Jamie Iles84457942011-05-06 15:28:55 +0100489 dev_err(denali->dev, "unable to send pipeline command\n");
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800490 status = -EIO;
Jason Robertsce082592010-05-13 15:57:33 +0100491 }
492 return status;
493}
494
495/* reads OOB data from the device */
496static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
497{
498 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada1aded582017-03-23 05:07:06 +0900499 uint32_t irq_mask = INTR__LOAD_COMP;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900500 uint32_t irq_status, addr, cmd;
Jason Robertsce082592010-05-13 15:57:33 +0100501
502 denali->page = page;
503
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800504 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800505 DENALI_READ) == PASS) {
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800506 read_data_from_flash_mem(denali, buf, mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +0100507
Masahiro Yamada43914a22014-09-09 11:01:51 +0900508 /*
509 * wait for command to be accepted
510 * can always use status0 bit as the
511 * mask is identical for each bank.
512 */
Jason Robertsce082592010-05-13 15:57:33 +0100513 irq_status = wait_for_irq(denali, irq_mask);
514
515 if (irq_status == 0)
Jamie Iles84457942011-05-06 15:28:55 +0100516 dev_err(denali->dev, "page on OOB timeout %d\n",
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800517 denali->page);
Jason Robertsce082592010-05-13 15:57:33 +0100518
Masahiro Yamada43914a22014-09-09 11:01:51 +0900519 /*
520 * We set the device back to MAIN_ACCESS here as I observed
Jason Robertsce082592010-05-13 15:57:33 +0100521 * instability with the controller if you do a block erase
522 * and the last transaction was a SPARE_ACCESS. Block erase
523 * is reliable (according to the MTD test infrastructure)
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800524 * if you are in MAIN_ACCESS.
Jason Robertsce082592010-05-13 15:57:33 +0100525 */
526 addr = BANK(denali->flash_bank) | denali->page;
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800527 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900528 index_addr(denali, cmd, MAIN_ACCESS);
Jason Robertsce082592010-05-13 15:57:33 +0100529 }
530}
531
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900532static int denali_check_erased_page(struct mtd_info *mtd,
533 struct nand_chip *chip, uint8_t *buf,
534 unsigned long uncor_ecc_flags,
535 unsigned int max_bitflips)
Jason Robertsce082592010-05-13 15:57:33 +0100536{
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900537 uint8_t *ecc_code = chip->buffers->ecccode;
538 int ecc_steps = chip->ecc.steps;
539 int ecc_size = chip->ecc.size;
540 int ecc_bytes = chip->ecc.bytes;
541 int i, ret, stat;
Masahiro Yamada81254502014-09-16 20:04:25 +0900542
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900543 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
544 chip->ecc.total);
545 if (ret)
546 return ret;
547
548 for (i = 0; i < ecc_steps; i++) {
549 if (!(uncor_ecc_flags & BIT(i)))
550 continue;
551
552 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
553 ecc_code, ecc_bytes,
554 NULL, 0,
555 chip->ecc.strength);
556 if (stat < 0) {
557 mtd->ecc_stats.failed++;
558 } else {
559 mtd->ecc_stats.corrected += stat;
560 max_bitflips = max_t(unsigned int, max_bitflips, stat);
561 }
562
563 buf += ecc_size;
564 ecc_code += ecc_bytes;
565 }
566
567 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100568}
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900569
Masahiro Yamada24715c72017-03-30 15:45:52 +0900570static int denali_hw_ecc_fixup(struct mtd_info *mtd,
571 struct denali_nand_info *denali,
572 unsigned long *uncor_ecc_flags)
573{
574 struct nand_chip *chip = mtd_to_nand(mtd);
575 int bank = denali->flash_bank;
576 uint32_t ecc_cor;
577 unsigned int max_bitflips;
578
579 ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
580 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
581
582 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
583 /*
584 * This flag is set when uncorrectable error occurs at least in
585 * one ECC sector. We can not know "how many sectors", or
586 * "which sector(s)". We need erase-page check for all sectors.
587 */
588 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
589 return 0;
590 }
591
592 max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
593
594 /*
595 * The register holds the maximum of per-sector corrected bitflips.
596 * This is suitable for the return value of the ->read_page() callback.
597 * Unfortunately, we can not know the total number of corrected bits in
598 * the page. Increase the stats by max_bitflips. (compromised solution)
599 */
600 mtd->ecc_stats.corrected += max_bitflips;
601
602 return max_bitflips;
603}
604
Jason Robertsce082592010-05-13 15:57:33 +0100605#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
606#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
607#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
Masahiro Yamada20d48592017-03-30 15:45:50 +0900608#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
Chuanxiao Dong8ae61eb2010-08-10 00:07:01 +0800609#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
Jason Robertsce082592010-05-13 15:57:33 +0100610#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
611
Masahiro Yamada24715c72017-03-30 15:45:52 +0900612static int denali_sw_ecc_fixup(struct mtd_info *mtd,
613 struct denali_nand_info *denali,
614 unsigned long *uncor_ecc_flags, uint8_t *buf)
Jason Robertsce082592010-05-13 15:57:33 +0100615{
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900616 unsigned int ecc_size = denali->nand.ecc.size;
Mike Dunn3f91e942012-04-25 12:06:09 -0700617 unsigned int bitflips = 0;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900618 unsigned int max_bitflips = 0;
619 uint32_t err_addr, err_cor_info;
620 unsigned int err_byte, err_sector, err_device;
621 uint8_t err_cor_value;
622 unsigned int prev_sector = 0;
Jason Robertsce082592010-05-13 15:57:33 +0100623
Masahiro Yamada20d48592017-03-30 15:45:50 +0900624 /* read the ECC errors. we'll ignore them for now */
625 denali_set_intr_modes(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100626
Masahiro Yamada20d48592017-03-30 15:45:50 +0900627 do {
628 err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
629 err_sector = ECC_SECTOR(err_addr);
630 err_byte = ECC_BYTE(err_addr);
Jason Robertsce082592010-05-13 15:57:33 +0100631
Masahiro Yamada20d48592017-03-30 15:45:50 +0900632 err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
633 err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
634 err_device = ECC_ERR_DEVICE(err_cor_info);
Jason Robertsce082592010-05-13 15:57:33 +0100635
Masahiro Yamada20d48592017-03-30 15:45:50 +0900636 /* reset the bitflip counter when crossing ECC sector */
637 if (err_sector != prev_sector)
638 bitflips = 0;
Masahiro Yamada81254502014-09-16 20:04:25 +0900639
Masahiro Yamada20d48592017-03-30 15:45:50 +0900640 if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
641 /*
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900642 * Check later if this is a real ECC error, or
643 * an erased sector.
Masahiro Yamada20d48592017-03-30 15:45:50 +0900644 */
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900645 *uncor_ecc_flags |= BIT(err_sector);
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900646 } else if (err_byte < ecc_size) {
Masahiro Yamada20d48592017-03-30 15:45:50 +0900647 /*
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900648 * If err_byte is larger than ecc_size, means error
Masahiro Yamada20d48592017-03-30 15:45:50 +0900649 * happened in OOB, so we ignore it. It's no need for
650 * us to correct it err_device is represented the NAND
651 * error bits are happened in if there are more than
652 * one NAND connected.
653 */
654 int offset;
655 unsigned int flips_in_byte;
656
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900657 offset = (err_sector * ecc_size + err_byte) *
Masahiro Yamada20d48592017-03-30 15:45:50 +0900658 denali->devnum + err_device;
659
660 /* correct the ECC error */
661 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
662 buf[offset] ^= err_cor_value;
663 mtd->ecc_stats.corrected += flips_in_byte;
664 bitflips += flips_in_byte;
665
666 max_bitflips = max(max_bitflips, bitflips);
667 }
668
669 prev_sector = err_sector;
670 } while (!ECC_LAST_ERR(err_cor_info));
671
672 /*
673 * Once handle all ecc errors, controller will trigger a
674 * ECC_TRANSACTION_DONE interrupt, so here just wait for
675 * a while for this interrupt
676 */
677 while (!(read_interrupt_status(denali) & INTR__ECC_TRANSACTION_DONE))
678 cpu_relax();
679 clear_interrupts(denali);
680 denali_set_intr_modes(denali, true);
681
682 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100683}
684
685/* programs the controller to either enable/disable DMA transfers */
David Woodhouseaadff492010-05-13 16:12:43 +0100686static void denali_enable_dma(struct denali_nand_info *denali, bool en)
Jason Robertsce082592010-05-13 15:57:33 +0100687{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900688 iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100689 ioread32(denali->flash_reg + DMA_ENABLE);
690}
691
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900692static void denali_setup_dma64(struct denali_nand_info *denali, int op)
693{
694 uint32_t mode;
695 const int page_count = 1;
696 uint64_t addr = denali->buf.dma_buf;
697
698 mode = MODE_10 | BANK(denali->flash_bank) | denali->page;
699
700 /* DMA is a three step process */
701
702 /*
703 * 1. setup transfer type, interrupt when complete,
704 * burst len = 64 bytes, the number of pages
705 */
706 index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
707
708 /* 2. set memory low address */
709 index_addr(denali, mode, addr);
710
711 /* 3. set memory high address */
712 index_addr(denali, mode, addr >> 32);
713}
714
715static void denali_setup_dma32(struct denali_nand_info *denali, int op)
Jason Robertsce082592010-05-13 15:57:33 +0100716{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900717 uint32_t mode;
Jason Robertsce082592010-05-13 15:57:33 +0100718 const int page_count = 1;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900719 uint32_t addr = denali->buf.dma_buf;
Jason Robertsce082592010-05-13 15:57:33 +0100720
721 mode = MODE_10 | BANK(denali->flash_bank);
722
723 /* DMA is a four step process */
724
725 /* 1. setup transfer type and # of pages */
726 index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
727
728 /* 2. set memory high address bits 23:8 */
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900729 index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
Jason Robertsce082592010-05-13 15:57:33 +0100730
731 /* 3. set memory low address bits 23:8 */
Graham Moore7c272ac2015-01-09 09:32:35 -0600732 index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
Jason Robertsce082592010-05-13 15:57:33 +0100733
Masahiro Yamada43914a22014-09-09 11:01:51 +0900734 /* 4. interrupt when complete, burst len = 64 bytes */
Jason Robertsce082592010-05-13 15:57:33 +0100735 index_addr(denali, mode | 0x14000, 0x2400);
736}
737
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900738static void denali_setup_dma(struct denali_nand_info *denali, int op)
739{
740 if (denali->caps & DENALI_CAP_DMA_64BIT)
741 denali_setup_dma64(denali, op);
742 else
743 denali_setup_dma32(denali, op);
744}
745
Masahiro Yamada43914a22014-09-09 11:01:51 +0900746/*
747 * writes a page. user specifies type, and this function handles the
748 * configuration details.
749 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800750static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900751 const uint8_t *buf, int page, bool raw_xfer)
Jason Robertsce082592010-05-13 15:57:33 +0100752{
753 struct denali_nand_info *denali = mtd_to_denali(mtd);
Jason Robertsce082592010-05-13 15:57:33 +0100754 dma_addr_t addr = denali->buf.dma_buf;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100755 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900756 uint32_t irq_status;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900757 uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900758 int ret = 0;
759
760 denali->page = page;
Jason Robertsce082592010-05-13 15:57:33 +0100761
Masahiro Yamada43914a22014-09-09 11:01:51 +0900762 /*
763 * if it is a raw xfer, we want to disable ecc and send the spare area.
Jason Robertsce082592010-05-13 15:57:33 +0100764 * !raw_xfer - enable ecc
765 * raw_xfer - transfer spare
766 */
767 setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
768
769 /* copy buffer into DMA buffer */
770 memcpy(denali->buf.buf, buf, mtd->writesize);
771
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800772 if (raw_xfer) {
Jason Robertsce082592010-05-13 15:57:33 +0100773 /* transfer the data to the spare area */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800774 memcpy(denali->buf.buf + mtd->writesize,
775 chip->oob_poi,
776 mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +0100777 }
778
Jamie Iles84457942011-05-06 15:28:55 +0100779 dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100780
781 clear_interrupts(denali);
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800782 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +0100783
David Woodhouseaadff492010-05-13 16:12:43 +0100784 denali_setup_dma(denali, DENALI_WRITE);
Jason Robertsce082592010-05-13 15:57:33 +0100785
786 /* wait for operation to complete */
787 irq_status = wait_for_irq(denali, irq_mask);
788
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800789 if (irq_status == 0) {
Masahiro Yamada81254502014-09-16 20:04:25 +0900790 dev_err(denali->dev, "timeout on write_page (type = %d)\n",
791 raw_xfer);
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900792 ret = -EIO;
Jason Robertsce082592010-05-13 15:57:33 +0100793 }
794
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800795 denali_enable_dma(denali, false);
Jamie Iles84457942011-05-06 15:28:55 +0100796 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
Josh Wufdbad98d2012-06-25 18:07:45 +0800797
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900798 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100799}
800
801/* NAND core entry points */
802
Masahiro Yamada43914a22014-09-09 11:01:51 +0900803/*
804 * this is the callback that the NAND core calls to write a page. Since
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800805 * writing a page with ECC or without is similar, all the work is done
806 * by write_page above.
Masahiro Yamada43914a22014-09-09 11:01:51 +0900807 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800808static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
Boris BREZILLON45aaeff2015-10-13 11:22:18 +0200809 const uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100810{
Masahiro Yamada43914a22014-09-09 11:01:51 +0900811 /*
812 * for regular page writes, we let HW handle all the ECC
813 * data written to the device.
814 */
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900815 return write_page(mtd, chip, buf, page, false);
Jason Robertsce082592010-05-13 15:57:33 +0100816}
817
Masahiro Yamada43914a22014-09-09 11:01:51 +0900818/*
819 * This is the callback that the NAND core calls to write a page without ECC.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300820 * raw access is similar to ECC page writes, so all the work is done in the
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800821 * write_page() function above.
Jason Robertsce082592010-05-13 15:57:33 +0100822 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800823static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Boris BREZILLON45aaeff2015-10-13 11:22:18 +0200824 const uint8_t *buf, int oob_required,
825 int page)
Jason Robertsce082592010-05-13 15:57:33 +0100826{
Masahiro Yamada43914a22014-09-09 11:01:51 +0900827 /*
828 * for raw page writes, we want to disable ECC and simply write
829 * whatever data is in the buffer.
830 */
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900831 return write_page(mtd, chip, buf, page, true);
Jason Robertsce082592010-05-13 15:57:33 +0100832}
833
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800834static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +0100835 int page)
836{
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800837 return write_oob_data(mtd, chip->oob_poi, page);
Jason Robertsce082592010-05-13 15:57:33 +0100838}
839
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800840static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300841 int page)
Jason Robertsce082592010-05-13 15:57:33 +0100842{
843 read_oob_data(mtd, chip->oob_poi, page);
844
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300845 return 0;
Jason Robertsce082592010-05-13 15:57:33 +0100846}
847
848static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700849 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100850{
851 struct denali_nand_info *denali = mtd_to_denali(mtd);
Jason Robertsce082592010-05-13 15:57:33 +0100852 dma_addr_t addr = denali->buf.dma_buf;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100853 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900854 uint32_t irq_status;
Masahiro Yamada24715c72017-03-30 15:45:52 +0900855 uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ?
856 INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR :
857 INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR;
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900858 unsigned long uncor_ecc_flags = 0;
859 int stat = 0;
Jason Robertsce082592010-05-13 15:57:33 +0100860
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900861 denali->page = page;
Chuanxiao Dong7d8a26f2010-08-11 18:19:23 +0800862
Jason Robertsce082592010-05-13 15:57:33 +0100863 setup_ecc_for_xfer(denali, true, false);
864
David Woodhouseaadff492010-05-13 16:12:43 +0100865 denali_enable_dma(denali, true);
Jamie Iles84457942011-05-06 15:28:55 +0100866 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100867
868 clear_interrupts(denali);
David Woodhouseaadff492010-05-13 16:12:43 +0100869 denali_setup_dma(denali, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +0100870
871 /* wait for operation to complete */
872 irq_status = wait_for_irq(denali, irq_mask);
873
Jamie Iles84457942011-05-06 15:28:55 +0100874 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100875
876 memcpy(buf, denali->buf.buf, mtd->writesize);
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800877
Masahiro Yamada24715c72017-03-30 15:45:52 +0900878 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
879 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
880 else if (irq_status & INTR__ECC_ERR)
881 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
David Woodhouseaadff492010-05-13 16:12:43 +0100882 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100883
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900884 if (stat < 0)
885 return stat;
886
887 if (uncor_ecc_flags) {
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100888 read_oob_data(mtd, chip->oob_poi, denali->page);
Jason Robertsce082592010-05-13 15:57:33 +0100889
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900890 stat = denali_check_erased_page(mtd, chip, buf,
891 uncor_ecc_flags, stat);
Jason Robertsce082592010-05-13 15:57:33 +0100892 }
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900893
894 return stat;
Jason Robertsce082592010-05-13 15:57:33 +0100895}
896
897static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700898 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100899{
900 struct denali_nand_info *denali = mtd_to_denali(mtd);
Jason Robertsce082592010-05-13 15:57:33 +0100901 dma_addr_t addr = denali->buf.dma_buf;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100902 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900903 uint32_t irq_mask = INTR__DMA_CMD_COMP;
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800904
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900905 denali->page = page;
Chuanxiao Dong7d8a26f2010-08-11 18:19:23 +0800906
Jason Robertsce082592010-05-13 15:57:33 +0100907 setup_ecc_for_xfer(denali, false, true);
David Woodhouseaadff492010-05-13 16:12:43 +0100908 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +0100909
Jamie Iles84457942011-05-06 15:28:55 +0100910 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100911
912 clear_interrupts(denali);
David Woodhouseaadff492010-05-13 16:12:43 +0100913 denali_setup_dma(denali, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +0100914
915 /* wait for operation to complete */
Brian Norrisba5f2bc2014-09-19 09:37:19 -0700916 wait_for_irq(denali, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100917
Jamie Iles84457942011-05-06 15:28:55 +0100918 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100919
David Woodhouseaadff492010-05-13 16:12:43 +0100920 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100921
922 memcpy(buf, denali->buf.buf, mtd->writesize);
923 memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
924
925 return 0;
926}
927
928static uint8_t denali_read_byte(struct mtd_info *mtd)
929{
930 struct denali_nand_info *denali = mtd_to_denali(mtd);
931 uint8_t result = 0xff;
932
933 if (denali->buf.head < denali->buf.tail)
Jason Robertsce082592010-05-13 15:57:33 +0100934 result = denali->buf.buf[denali->buf.head++];
Jason Robertsce082592010-05-13 15:57:33 +0100935
Jason Robertsce082592010-05-13 15:57:33 +0100936 return result;
937}
938
939static void denali_select_chip(struct mtd_info *mtd, int chip)
940{
941 struct denali_nand_info *denali = mtd_to_denali(mtd);
Chuanxiao Dong7cfffac2010-08-10 00:16:51 +0800942
Jason Robertsce082592010-05-13 15:57:33 +0100943 spin_lock_irq(&denali->irq_lock);
944 denali->flash_bank = chip;
945 spin_unlock_irq(&denali->irq_lock);
946}
947
948static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
949{
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900950 return 0;
Jason Robertsce082592010-05-13 15:57:33 +0100951}
952
Brian Norris49c50b92014-05-06 16:02:19 -0700953static int denali_erase(struct mtd_info *mtd, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100954{
955 struct denali_nand_info *denali = mtd_to_denali(mtd);
956
Masahiro Yamada5637b692014-09-09 11:01:52 +0900957 uint32_t cmd, irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100958
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800959 clear_interrupts(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100960
961 /* setup page read request for access type */
962 cmd = MODE_10 | BANK(denali->flash_bank) | page;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900963 index_addr(denali, cmd, 0x1);
Jason Robertsce082592010-05-13 15:57:33 +0100964
965 /* wait for erase to complete or failure to occur */
Masahiro Yamada1aded582017-03-23 05:07:06 +0900966 irq_status = wait_for_irq(denali, INTR__ERASE_COMP | INTR__ERASE_FAIL);
Jason Robertsce082592010-05-13 15:57:33 +0100967
Masahiro Yamada1aded582017-03-23 05:07:06 +0900968 return irq_status & INTR__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
Jason Robertsce082592010-05-13 15:57:33 +0100969}
970
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800971static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
Jason Robertsce082592010-05-13 15:57:33 +0100972 int page)
973{
974 struct denali_nand_info *denali = mtd_to_denali(mtd);
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800975 uint32_t addr, id;
976 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100977
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800978 switch (cmd) {
Chuanxiao Donga99d1792010-07-27 11:32:21 +0800979 case NAND_CMD_STATUS:
980 read_status(denali);
981 break;
982 case NAND_CMD_READID:
Florian Fainelli42af8b52010-08-30 18:32:20 +0200983 case NAND_CMD_PARAM:
Chuanxiao Donga99d1792010-07-27 11:32:21 +0800984 reset_buf(denali);
Masahiro Yamada43914a22014-09-09 11:01:51 +0900985 /*
986 * sometimes ManufactureId read from register is not right
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800987 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
988 * So here we send READID cmd to NAND insteand
Masahiro Yamada43914a22014-09-09 11:01:51 +0900989 */
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900990 addr = MODE_11 | BANK(denali->flash_bank);
991 index_addr(denali, addr | 0, 0x90);
Enrico Jorns9c07d092015-09-18 10:02:41 +0200992 index_addr(denali, addr | 1, col);
grmoore@altera.comd68a5c32014-06-23 14:21:10 -0500993 for (i = 0; i < 8; i++) {
Masahiro Yamada81254502014-09-16 20:04:25 +0900994 index_addr_read_data(denali, addr | 2, &id);
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800995 write_byte_to_buf(denali, id);
Chuanxiao Donga99d1792010-07-27 11:32:21 +0800996 }
997 break;
Chuanxiao Donga99d1792010-07-27 11:32:21 +0800998 case NAND_CMD_RESET:
999 reset_bank(denali);
1000 break;
1001 case NAND_CMD_READOOB:
1002 /* TODO: Read OOB data */
1003 break;
1004 default:
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001005 pr_err(": unsupported command received 0x%x\n", cmd);
Chuanxiao Donga99d1792010-07-27 11:32:21 +08001006 break;
Jason Robertsce082592010-05-13 15:57:33 +01001007 }
1008}
Masahiro Yamada1bb88662017-06-13 22:45:37 +09001009
1010#define DIV_ROUND_DOWN_ULL(ll, d) \
1011 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
1012
1013static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
1014 const struct nand_data_interface *conf)
1015{
1016 struct denali_nand_info *denali = mtd_to_denali(mtd);
1017 const struct nand_sdr_timings *timings;
1018 unsigned long t_clk;
1019 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
1020 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
1021 int addr_2_data_mask;
1022 uint32_t tmp;
1023
1024 timings = nand_get_sdr_timings(conf);
1025 if (IS_ERR(timings))
1026 return PTR_ERR(timings);
1027
1028 /* clk_x period in picoseconds */
1029 t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
1030 if (!t_clk)
1031 return -EINVAL;
1032
1033 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
1034 return 0;
1035
1036 /* tREA -> ACC_CLKS */
1037 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
1038 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
1039
1040 tmp = ioread32(denali->flash_reg + ACC_CLKS);
1041 tmp &= ~ACC_CLKS__VALUE;
1042 tmp |= acc_clks;
1043 iowrite32(tmp, denali->flash_reg + ACC_CLKS);
1044
1045 /* tRWH -> RE_2_WE */
1046 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
1047 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
1048
1049 tmp = ioread32(denali->flash_reg + RE_2_WE);
1050 tmp &= ~RE_2_WE__VALUE;
1051 tmp |= re_2_we;
1052 iowrite32(tmp, denali->flash_reg + RE_2_WE);
1053
1054 /* tRHZ -> RE_2_RE */
1055 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
1056 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
1057
1058 tmp = ioread32(denali->flash_reg + RE_2_RE);
1059 tmp &= ~RE_2_RE__VALUE;
1060 tmp |= re_2_re;
1061 iowrite32(tmp, denali->flash_reg + RE_2_RE);
1062
1063 /* tWHR -> WE_2_RE */
1064 we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
1065 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
1066
1067 tmp = ioread32(denali->flash_reg + TWHR2_AND_WE_2_RE);
1068 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
1069 tmp |= we_2_re;
1070 iowrite32(tmp, denali->flash_reg + TWHR2_AND_WE_2_RE);
1071
1072 /* tADL -> ADDR_2_DATA */
1073
1074 /* for older versions, ADDR_2_DATA is only 6 bit wide */
1075 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1076 if (denali->revision < 0x0501)
1077 addr_2_data_mask >>= 1;
1078
1079 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
1080 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1081
1082 tmp = ioread32(denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1083 tmp &= ~addr_2_data_mask;
1084 tmp |= addr_2_data;
1085 iowrite32(tmp, denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1086
1087 /* tREH, tWH -> RDWR_EN_HI_CNT */
1088 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1089 t_clk);
1090 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1091
1092 tmp = ioread32(denali->flash_reg + RDWR_EN_HI_CNT);
1093 tmp &= ~RDWR_EN_HI_CNT__VALUE;
1094 tmp |= rdwr_en_hi;
1095 iowrite32(tmp, denali->flash_reg + RDWR_EN_HI_CNT);
1096
1097 /* tRP, tWP -> RDWR_EN_LO_CNT */
1098 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
1099 t_clk);
1100 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1101 t_clk);
1102 rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
1103 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1104 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1105
1106 tmp = ioread32(denali->flash_reg + RDWR_EN_LO_CNT);
1107 tmp &= ~RDWR_EN_LO_CNT__VALUE;
1108 tmp |= rdwr_en_lo;
1109 iowrite32(tmp, denali->flash_reg + RDWR_EN_LO_CNT);
1110
1111 /* tCS, tCEA -> CS_SETUP_CNT */
1112 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
1113 (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
1114 0);
1115 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1116
1117 tmp = ioread32(denali->flash_reg + CS_SETUP_CNT);
1118 tmp &= ~CS_SETUP_CNT__VALUE;
1119 tmp |= cs_setup;
1120 iowrite32(tmp, denali->flash_reg + CS_SETUP_CNT);
1121
1122 return 0;
1123}
Jason Robertsce082592010-05-13 15:57:33 +01001124
1125/* Initialization code to bring the device up to a known good state */
1126static void denali_hw_init(struct denali_nand_info *denali)
1127{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001128 /*
Masahiro Yamadae7beeee2017-03-30 15:45:57 +09001129 * The REVISION register may not be reliable. Platforms are allowed to
1130 * override it.
1131 */
1132 if (!denali->revision)
1133 denali->revision =
1134 swab16(ioread32(denali->flash_reg + REVISION));
1135
1136 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001137 * tell driver how many bit controller will skip before
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001138 * writing ECC code in OOB, this register may be already
1139 * set by firmware. So we read this value out.
1140 * if this value is 0, just let it be.
Masahiro Yamada43914a22014-09-09 11:01:51 +09001141 */
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001142 denali->bbtskipbytes = ioread32(denali->flash_reg +
1143 SPARE_AREA_SKIP_BYTES);
Jamie Ilesbc27ede2011-06-06 17:11:34 +01001144 detect_max_banks(denali);
Chuanxiao Dongeda936e2010-07-27 14:17:37 +08001145 denali_nand_reset(denali);
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001146 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1147 iowrite32(CHIP_EN_DONT_CARE__FLAG,
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001148 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
Jason Robertsce082592010-05-13 15:57:33 +01001149
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001150 iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
Jason Robertsce082592010-05-13 15:57:33 +01001151
1152 /* Should set value for these registers when init */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001153 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1154 iowrite32(1, denali->flash_reg + ECC_ENABLE);
Chuanxiao Dong5eab6aaa2010-08-12 10:07:18 +08001155 denali_irq_init(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001156}
1157
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001158int denali_calc_ecc_bytes(int step_size, int strength)
1159{
1160 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1161 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1162}
1163EXPORT_SYMBOL(denali_calc_ecc_bytes);
1164
1165static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
1166 struct denali_nand_info *denali)
1167{
1168 int oobavail = mtd->oobsize - denali->bbtskipbytes;
1169 int ret;
1170
1171 /*
1172 * If .size and .strength are already set (usually by DT),
1173 * check if they are supported by this controller.
1174 */
1175 if (chip->ecc.size && chip->ecc.strength)
1176 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
1177
1178 /*
1179 * We want .size and .strength closest to the chip's requirement
1180 * unless NAND_ECC_MAXIMIZE is requested.
1181 */
1182 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
1183 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
1184 if (!ret)
1185 return 0;
1186 }
1187
1188 /* Max ECC strength is the last thing we can do */
1189 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
1190}
Boris Brezillon14fad622016-02-03 20:00:11 +01001191
1192static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1193 struct mtd_oob_region *oobregion)
1194{
1195 struct denali_nand_info *denali = mtd_to_denali(mtd);
1196 struct nand_chip *chip = mtd_to_nand(mtd);
1197
1198 if (section)
1199 return -ERANGE;
1200
1201 oobregion->offset = denali->bbtskipbytes;
1202 oobregion->length = chip->ecc.total;
1203
1204 return 0;
1205}
1206
1207static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1208 struct mtd_oob_region *oobregion)
1209{
1210 struct denali_nand_info *denali = mtd_to_denali(mtd);
1211 struct nand_chip *chip = mtd_to_nand(mtd);
1212
1213 if (section)
1214 return -ERANGE;
1215
1216 oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
1217 oobregion->length = mtd->oobsize - oobregion->offset;
1218
1219 return 0;
1220}
1221
1222static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1223 .ecc = denali_ooblayout_ecc,
1224 .free = denali_ooblayout_free,
Jason Robertsce082592010-05-13 15:57:33 +01001225};
1226
1227static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1228static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1229
1230static struct nand_bbt_descr bbt_main_descr = {
1231 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1232 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1233 .offs = 8,
1234 .len = 4,
1235 .veroffs = 12,
1236 .maxblocks = 4,
1237 .pattern = bbt_pattern,
1238};
1239
1240static struct nand_bbt_descr bbt_mirror_descr = {
1241 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1242 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1243 .offs = 8,
1244 .len = 4,
1245 .veroffs = 12,
1246 .maxblocks = 4,
1247 .pattern = mirror_pattern,
1248};
1249
Uwe Kleine-König421f91d2010-06-11 12:17:00 +02001250/* initialize driver data structures */
Brian Norris8c519432013-08-10 22:57:30 -07001251static void denali_drv_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001252{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001253 /*
1254 * the completion object will be used to notify
1255 * the callee that the interrupt is done
1256 */
Jason Robertsce082592010-05-13 15:57:33 +01001257 init_completion(&denali->complete);
1258
Masahiro Yamada43914a22014-09-09 11:01:51 +09001259 /*
1260 * the spinlock will be used to synchronize the ISR with any
1261 * element that might be access shared data (interrupt status)
1262 */
Jason Robertsce082592010-05-13 15:57:33 +01001263 spin_lock_init(&denali->irq_lock);
1264
1265 /* indicate that MTD has not selected a valid bank yet */
1266 denali->flash_bank = CHIP_SELECT_INVALID;
1267
1268 /* initialize our irq_status variable to indicate no interrupts */
1269 denali->irq_status = 0;
1270}
1271
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001272static int denali_multidev_fixup(struct denali_nand_info *denali)
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001273{
1274 struct nand_chip *chip = &denali->nand;
1275 struct mtd_info *mtd = nand_to_mtd(chip);
1276
1277 /*
1278 * Support for multi device:
1279 * When the IP configuration is x16 capable and two x8 chips are
1280 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1281 * In this case, the core framework knows nothing about this fact,
1282 * so we should tell it the _logical_ pagesize and anything necessary.
1283 */
1284 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1285
Masahiro Yamadacc5d8032017-03-23 05:07:22 +09001286 /*
1287 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1288 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1289 */
1290 if (denali->devnum == 0) {
1291 denali->devnum = 1;
1292 iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
1293 }
1294
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001295 if (denali->devnum == 1)
1296 return 0;
1297
1298 if (denali->devnum != 2) {
1299 dev_err(denali->dev, "unsupported number of devices %d\n",
1300 denali->devnum);
1301 return -EINVAL;
1302 }
1303
1304 /* 2 chips in parallel */
1305 mtd->size <<= 1;
1306 mtd->erasesize <<= 1;
1307 mtd->writesize <<= 1;
1308 mtd->oobsize <<= 1;
1309 chip->chipsize <<= 1;
1310 chip->page_shift += 1;
1311 chip->phys_erase_shift += 1;
1312 chip->bbt_erase_shift += 1;
1313 chip->chip_shift += 1;
1314 chip->pagemask <<= 1;
1315 chip->ecc.size <<= 1;
1316 chip->ecc.bytes <<= 1;
1317 chip->ecc.strength <<= 1;
1318 denali->bbtskipbytes <<= 1;
1319
1320 return 0;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001321}
1322
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001323int denali_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001324{
Masahiro Yamada1394a722017-03-23 05:07:17 +09001325 struct nand_chip *chip = &denali->nand;
1326 struct mtd_info *mtd = nand_to_mtd(chip);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001327 int ret;
Jason Robertsce082592010-05-13 15:57:33 +01001328
Huang Shijiee07caa32013-12-21 00:02:28 +08001329 /* allocate a temporary buffer for nand_scan_ident() */
1330 denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
1331 GFP_DMA | GFP_KERNEL);
1332 if (!denali->buf.buf)
1333 return -ENOMEM;
Jason Robertsce082592010-05-13 15:57:33 +01001334
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001335 mtd->dev.parent = denali->dev;
Jason Robertsce082592010-05-13 15:57:33 +01001336 denali_hw_init(denali);
1337 denali_drv_init(denali);
1338
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001339 /* Request IRQ after all the hardware initialization is finished */
1340 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1341 IRQF_SHARED, DENALI_NAND_NAME, denali);
1342 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001343 dev_err(denali->dev, "Unable to request IRQ\n");
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001344 return ret;
Jason Robertsce082592010-05-13 15:57:33 +01001345 }
1346
1347 /* now that our ISR is registered, we can enable interrupts */
Chuanxiao Dongeda936e2010-07-27 14:17:37 +08001348 denali_set_intr_modes(denali, true);
Masahiro Yamada63757d42017-03-23 05:07:18 +09001349 nand_set_flash_node(chip, denali->dev->of_node);
Masahiro Yamada8aabdf32017-03-30 15:45:48 +09001350 /* Fallback to the default name if DT did not give "label" property */
1351 if (!mtd->name)
1352 mtd->name = "denali-nand";
Jason Robertsce082592010-05-13 15:57:33 +01001353
1354 /* register the driver with the NAND core subsystem */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001355 chip->select_chip = denali_select_chip;
1356 chip->cmdfunc = denali_cmdfunc;
1357 chip->read_byte = denali_read_byte;
1358 chip->waitfunc = denali_waitfunc;
Boris Brezillon4a78cc62017-05-26 17:10:15 +02001359 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
1360 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
Jason Robertsce082592010-05-13 15:57:33 +01001361
Masahiro Yamada1bb88662017-06-13 22:45:37 +09001362 /* clk rate info is needed for setup_data_interface */
1363 if (denali->clk_x_rate)
1364 chip->setup_data_interface = denali_setup_data_interface;
1365
Masahiro Yamada43914a22014-09-09 11:01:51 +09001366 /*
1367 * scan for NAND devices attached to the controller
Jason Robertsce082592010-05-13 15:57:33 +01001368 * this is the first stage in a two step process to register
Masahiro Yamada43914a22014-09-09 11:01:51 +09001369 * with the nand subsystem
1370 */
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001371 ret = nand_scan_ident(mtd, denali->max_banks, NULL);
1372 if (ret)
Chuanxiao Dong5c0eb902010-08-09 18:37:00 +08001373 goto failed_req_irq;
Chuanxiao5bac3acf2010-08-05 23:06:04 +08001374
Huang Shijiee07caa32013-12-21 00:02:28 +08001375 /* allocate the right size buffer now */
1376 devm_kfree(denali->dev, denali->buf.buf);
1377 denali->buf.buf = devm_kzalloc(denali->dev,
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001378 mtd->writesize + mtd->oobsize,
Huang Shijiee07caa32013-12-21 00:02:28 +08001379 GFP_KERNEL);
1380 if (!denali->buf.buf) {
1381 ret = -ENOMEM;
1382 goto failed_req_irq;
1383 }
1384
Masahiro Yamada210a2c82017-03-30 15:45:54 +09001385 ret = dma_set_mask(denali->dev,
1386 DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ?
1387 64 : 32));
Huang Shijiee07caa32013-12-21 00:02:28 +08001388 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001389 dev_err(denali->dev, "No usable DMA configuration\n");
Huang Shijiee07caa32013-12-21 00:02:28 +08001390 goto failed_req_irq;
1391 }
1392
1393 denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001394 mtd->writesize + mtd->oobsize,
Huang Shijiee07caa32013-12-21 00:02:28 +08001395 DMA_BIDIRECTIONAL);
1396 if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001397 dev_err(denali->dev, "Failed to map DMA buffer\n");
Huang Shijiee07caa32013-12-21 00:02:28 +08001398 ret = -EIO;
Chuanxiao Dong5c0eb902010-08-09 18:37:00 +08001399 goto failed_req_irq;
Chuanxiao.Dong664065242010-08-06 18:48:21 +08001400 }
1401
Masahiro Yamada43914a22014-09-09 11:01:51 +09001402 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001403 * second stage of the NAND scan
Chuanxiao5bac3acf2010-08-05 23:06:04 +08001404 * this stage requires information regarding ECC and
Masahiro Yamada43914a22014-09-09 11:01:51 +09001405 * bad block management.
1406 */
Jason Robertsce082592010-05-13 15:57:33 +01001407
1408 /* Bad block management */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001409 chip->bbt_td = &bbt_main_descr;
1410 chip->bbt_md = &bbt_mirror_descr;
Jason Robertsce082592010-05-13 15:57:33 +01001411
1412 /* skip the scan for now until we have OOB read and write support */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001413 chip->bbt_options |= NAND_BBT_USE_FLASH;
1414 chip->options |= NAND_SKIP_BBTSCAN;
1415 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
Jason Robertsce082592010-05-13 15:57:33 +01001416
Graham Moored99d7282015-01-14 09:38:50 -06001417 /* no subpage writes on denali */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001418 chip->options |= NAND_NO_SUBPAGE_WRITE;
Graham Moored99d7282015-01-14 09:38:50 -06001419
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001420 ret = denali_ecc_setup(mtd, chip, denali);
1421 if (ret) {
1422 dev_err(denali->dev, "Failed to setup ECC settings.\n");
Chuanxiao Dong5c0eb902010-08-09 18:37:00 +08001423 goto failed_req_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001424 }
1425
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001426 dev_dbg(denali->dev,
1427 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1428 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1429
1430 iowrite32(chip->ecc.strength, denali->flash_reg + ECC_CORRECTION);
Masahiro Yamada0615e7a2017-06-07 20:52:13 +09001431 iowrite32(mtd->erasesize / mtd->writesize,
1432 denali->flash_reg + PAGES_PER_BLOCK);
1433 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1434 denali->flash_reg + DEVICE_WIDTH);
1435 iowrite32(mtd->writesize, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
1436 iowrite32(mtd->oobsize, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001437
1438 iowrite32(chip->ecc.size, denali->flash_reg + CFG_DATA_BLOCK_SIZE);
1439 iowrite32(chip->ecc.size, denali->flash_reg + CFG_LAST_DATA_BLOCK_SIZE);
1440 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1441 iowrite32(mtd->writesize / chip->ecc.size,
1442 denali->flash_reg + CFG_NUM_DATA_BLOCKS);
1443
Boris Brezillon14fad622016-02-03 20:00:11 +01001444 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001445
Masahiro Yamadab21ff822017-06-13 22:45:35 +09001446 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001447 chip->ecc.read_page = denali_read_page;
1448 chip->ecc.read_page_raw = denali_read_page_raw;
1449 chip->ecc.write_page = denali_write_page;
1450 chip->ecc.write_page_raw = denali_write_page_raw;
1451 chip->ecc.read_oob = denali_read_oob;
1452 chip->ecc.write_oob = denali_write_oob;
1453 chip->erase = denali_erase;
Jason Robertsce082592010-05-13 15:57:33 +01001454
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001455 ret = denali_multidev_fixup(denali);
1456 if (ret)
1457 goto failed_req_irq;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001458
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001459 ret = nand_scan_tail(mtd);
1460 if (ret)
Chuanxiao Dong5c0eb902010-08-09 18:37:00 +08001461 goto failed_req_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001462
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001463 ret = mtd_device_register(mtd, NULL, 0);
Jason Robertsce082592010-05-13 15:57:33 +01001464 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001465 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
Chuanxiao Dong5c0eb902010-08-09 18:37:00 +08001466 goto failed_req_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001467 }
1468 return 0;
1469
Chuanxiao Dong5c0eb902010-08-09 18:37:00 +08001470failed_req_irq:
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001471 denali_irq_cleanup(denali->irq, denali);
1472
Jason Robertsce082592010-05-13 15:57:33 +01001473 return ret;
1474}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001475EXPORT_SYMBOL(denali_init);
Jason Robertsce082592010-05-13 15:57:33 +01001476
1477/* driver exit point */
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001478void denali_remove(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001479{
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001480 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
Boris BREZILLON320092a2015-12-11 15:02:34 +01001481 /*
1482 * Pre-compute DMA buffer size to avoid any problems in case
1483 * nand_release() ever changes in a way that mtd->writesize and
1484 * mtd->oobsize are not reliable after this call.
1485 */
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001486 int bufsize = mtd->writesize + mtd->oobsize;
Boris BREZILLON320092a2015-12-11 15:02:34 +01001487
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001488 nand_release(mtd);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001489 denali_irq_cleanup(denali->irq, denali);
Boris BREZILLON320092a2015-12-11 15:02:34 +01001490 dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize,
Masahiro Yamada81254502014-09-16 20:04:25 +09001491 DMA_BIDIRECTIONAL);
Jason Robertsce082592010-05-13 15:57:33 +01001492}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001493EXPORT_SYMBOL(denali_remove);