blob: 8091ba0916cc4c93f303d7d2a1abc263463e4504 [file] [log] [blame]
Jason Robertsce082592010-05-13 15:57:33 +01001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
Jason Robertsce082592010-05-13 15:57:33 +010019#include <linux/interrupt.h>
20#include <linux/delay.h>
Jamie Iles84457942011-05-06 15:28:55 +010021#include <linux/dma-mapping.h>
Jason Robertsce082592010-05-13 15:57:33 +010022#include <linux/wait.h>
23#include <linux/mutex.h>
Jason Robertsce082592010-05-13 15:57:33 +010024#include <linux/mtd/mtd.h>
25#include <linux/module.h>
26
27#include "denali.h"
28
29MODULE_LICENSE("GPL");
30
Jason Robertsce082592010-05-13 15:57:33 +010031#define DENALI_NAND_NAME "denali-nand"
32
Masahiro Yamada43914a22014-09-09 11:01:51 +090033/*
Masahiro Yamada43914a22014-09-09 11:01:51 +090034 * indicates whether or not the internal value for the flash bank is
35 * valid or not
36 */
Chuanxiao5bac3acf2010-08-05 23:06:04 +080037#define CHIP_SELECT_INVALID -1
Jason Robertsce082592010-05-13 15:57:33 +010038
Masahiro Yamadac19e31d2017-06-13 22:45:38 +090039#define DENALI_NR_BANKS 4
40
Masahiro Yamada43914a22014-09-09 11:01:51 +090041/*
Masahiro Yamada1bb88662017-06-13 22:45:37 +090042 * The bus interface clock, clk_x, is phase aligned with the core clock. The
43 * clk_x is an integral multiple N of the core clk. The value N is configured
44 * at IP delivery time, and its available value is 4, 5, or 6. We need to align
45 * to the largest value to make it work with any possible configuration.
Masahiro Yamada43914a22014-09-09 11:01:51 +090046 */
Masahiro Yamada1bb88662017-06-13 22:45:37 +090047#define DENALI_CLK_X_MULT 6
Jason Robertsce082592010-05-13 15:57:33 +010048
Masahiro Yamada43914a22014-09-09 11:01:51 +090049/*
50 * this macro allows us to convert from an MTD structure to our own
Jason Robertsce082592010-05-13 15:57:33 +010051 * device context (denali) structure.
52 */
Boris BREZILLON442f201b2015-12-11 15:06:00 +010053static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
54{
55 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
56}
Jason Robertsce082592010-05-13 15:57:33 +010057
Masahiro Yamada43914a22014-09-09 11:01:51 +090058/*
59 * These constants are defined by the driver to enable common driver
60 * configuration options.
61 */
Jason Robertsce082592010-05-13 15:57:33 +010062#define SPARE_ACCESS 0x41
63#define MAIN_ACCESS 0x42
64#define MAIN_SPARE_ACCESS 0x43
65
66#define DENALI_READ 0
67#define DENALI_WRITE 0x100
68
Masahiro Yamada43914a22014-09-09 11:01:51 +090069/*
70 * this is a helper macro that allows us to
71 * format the bank into the proper bits for the controller
72 */
Jason Robertsce082592010-05-13 15:57:33 +010073#define BANK(x) ((x) << 24)
74
Masahiro Yamada43914a22014-09-09 11:01:51 +090075/*
76 * Certain operations for the denali NAND controller use an indexed mode to
77 * read/write data. The operation is performed by writing the address value
78 * of the command to the device memory followed by the data. This function
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080079 * abstracts this common operation.
Masahiro Yamada43914a22014-09-09 11:01:51 +090080 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080081static void index_addr(struct denali_nand_info *denali,
82 uint32_t address, uint32_t data)
Jason Robertsce082592010-05-13 15:57:33 +010083{
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +080084 iowrite32(address, denali->flash_mem);
85 iowrite32(data, denali->flash_mem + 0x10);
Jason Robertsce082592010-05-13 15:57:33 +010086}
87
Jason Robertsce082592010-05-13 15:57:33 +010088/* Reset the flash controller */
Chuanxiao Dongeda936e2010-07-27 14:17:37 +080089static uint16_t denali_nand_reset(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +010090{
Masahiro Yamada93e3c8a2014-09-09 11:01:54 +090091 int i;
Jason Robertsce082592010-05-13 15:57:33 +010092
Masahiro Yamada81254502014-09-16 20:04:25 +090093 for (i = 0; i < denali->max_banks; i++)
Masahiro Yamada1aded582017-03-23 05:07:06 +090094 iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
Jamie Iles9589bf52011-05-06 15:28:56 +010095 denali->flash_reg + INTR_STATUS(i));
Jason Robertsce082592010-05-13 15:57:33 +010096
Masahiro Yamada81254502014-09-16 20:04:25 +090097 for (i = 0; i < denali->max_banks; i++) {
Jamie Iles9589bf52011-05-06 15:28:56 +010098 iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
Masahiro Yamada81254502014-09-16 20:04:25 +090099 while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
Masahiro Yamada1aded582017-03-23 05:07:06 +0900100 (INTR__RST_COMP | INTR__TIME_OUT)))
Chuanxiao Dong628bfd412010-08-11 17:53:29 +0800101 cpu_relax();
Jamie Iles9589bf52011-05-06 15:28:56 +0100102 if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
Masahiro Yamada1aded582017-03-23 05:07:06 +0900103 INTR__TIME_OUT)
Jamie Iles84457942011-05-06 15:28:55 +0100104 dev_dbg(denali->dev,
Jason Robertsce082592010-05-13 15:57:33 +0100105 "NAND Reset operation timed out on bank %d\n", i);
106 }
107
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100108 for (i = 0; i < denali->max_banks; i++)
Masahiro Yamada1aded582017-03-23 05:07:06 +0900109 iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
Masahiro Yamada81254502014-09-16 20:04:25 +0900110 denali->flash_reg + INTR_STATUS(i));
Jason Robertsce082592010-05-13 15:57:33 +0100111
112 return PASS;
113}
114
Masahiro Yamada43914a22014-09-09 11:01:51 +0900115/*
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100116 * Use the configuration feature register to determine the maximum number of
117 * banks that the hardware supports.
118 */
119static void detect_max_banks(struct denali_nand_info *denali)
120{
121 uint32_t features = ioread32(denali->flash_reg + FEATURES);
122
Masahiro Yamadae7beeee2017-03-30 15:45:57 +0900123 denali->max_banks = 1 << (features & FEATURES__N_BANKS);
124
125 /* the encoding changed from rev 5.0 to 5.1 */
126 if (denali->revision < 0x0501)
127 denali->max_banks <<= 1;
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100128}
129
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900130static void denali_enable_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100131{
Jamie Iles9589bf52011-05-06 15:28:56 +0100132 int i;
133
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900134 for (i = 0; i < DENALI_NR_BANKS; i++)
135 iowrite32(U32_MAX, denali->flash_reg + INTR_EN(i));
136 iowrite32(GLOBAL_INT_EN_FLAG, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100137}
138
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900139static void denali_disable_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100140{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900141 int i;
142
143 for (i = 0; i < DENALI_NR_BANKS; i++)
144 iowrite32(0, denali->flash_reg + INTR_EN(i));
145 iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100146}
147
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900148static void denali_clear_irq(struct denali_nand_info *denali,
149 int bank, uint32_t irq_status)
Jason Robertsce082592010-05-13 15:57:33 +0100150{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900151 /* write one to clear bits */
152 iowrite32(irq_status, denali->flash_reg + INTR_STATUS(bank));
Jason Robertsce082592010-05-13 15:57:33 +0100153}
154
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900155static void denali_clear_irq_all(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100156{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900157 int i;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900158
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900159 for (i = 0; i < DENALI_NR_BANKS; i++)
160 denali_clear_irq(denali, i, U32_MAX);
Jason Robertsce082592010-05-13 15:57:33 +0100161}
162
Jason Robertsce082592010-05-13 15:57:33 +0100163static irqreturn_t denali_isr(int irq, void *dev_id)
164{
165 struct denali_nand_info *denali = dev_id;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900166 irqreturn_t ret = IRQ_NONE;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900167 uint32_t irq_status;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900168 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100169
170 spin_lock(&denali->irq_lock);
171
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900172 for (i = 0; i < DENALI_NR_BANKS; i++) {
173 irq_status = ioread32(denali->flash_reg + INTR_STATUS(i));
174 if (irq_status)
175 ret = IRQ_HANDLED;
176
177 denali_clear_irq(denali, i, irq_status);
178
179 if (i != denali->flash_bank)
180 continue;
181
182 denali->irq_status |= irq_status;
183
184 if (denali->irq_status & denali->irq_mask)
Jason Robertsce082592010-05-13 15:57:33 +0100185 complete(&denali->complete);
Jason Robertsce082592010-05-13 15:57:33 +0100186 }
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900187
Jason Robertsce082592010-05-13 15:57:33 +0100188 spin_unlock(&denali->irq_lock);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900189
190 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100191}
Jason Robertsce082592010-05-13 15:57:33 +0100192
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900193static void denali_reset_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100194{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900195 unsigned long flags;
Jason Robertsce082592010-05-13 15:57:33 +0100196
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900197 spin_lock_irqsave(&denali->irq_lock, flags);
198 denali->irq_status = 0;
199 denali->irq_mask = 0;
200 spin_unlock_irqrestore(&denali->irq_lock, flags);
201}
Jason Robertsce082592010-05-13 15:57:33 +0100202
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900203static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
204 uint32_t irq_mask)
205{
206 unsigned long time_left, flags;
207 uint32_t irq_status;
Masahiro Yamada81254502014-09-16 20:04:25 +0900208
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900209 spin_lock_irqsave(&denali->irq_lock, flags);
Jason Robertsce082592010-05-13 15:57:33 +0100210
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900211 irq_status = denali->irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100212
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900213 if (irq_mask & irq_status) {
214 /* return immediately if the IRQ has already happened. */
215 spin_unlock_irqrestore(&denali->irq_lock, flags);
216 return irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100217 }
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900218
219 denali->irq_mask = irq_mask;
220 reinit_completion(&denali->complete);
221 spin_unlock_irqrestore(&denali->irq_lock, flags);
222
223 time_left = wait_for_completion_timeout(&denali->complete,
224 msecs_to_jiffies(1000));
225 if (!time_left) {
226 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
227 denali->irq_mask);
228 return 0;
229 }
230
231 return denali->irq_status;
232}
233
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900234static uint32_t denali_check_irq(struct denali_nand_info *denali)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900235{
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900236 unsigned long flags;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900237 uint32_t irq_status;
238
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900239 spin_lock_irqsave(&denali->irq_lock, flags);
240 irq_status = denali->irq_status;
241 spin_unlock_irqrestore(&denali->irq_lock, flags);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900242
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900243 return irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100244}
245
Masahiro Yamada43914a22014-09-09 11:01:51 +0900246/*
247 * This helper function setups the registers for ECC and whether or not
248 * the spare area will be transferred.
249 */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800250static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
Jason Robertsce082592010-05-13 15:57:33 +0100251 bool transfer_spare)
252{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900253 int ecc_en_flag, transfer_spare_flag;
Jason Robertsce082592010-05-13 15:57:33 +0100254
255 /* set ECC, transfer spare bits if needed */
256 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
257 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
258
259 /* Enable spare area/ECC per user's request. */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800260 iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
Masahiro Yamada81254502014-09-16 20:04:25 +0900261 iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
Jason Robertsce082592010-05-13 15:57:33 +0100262}
263
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900264static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
265{
266 struct denali_nand_info *denali = mtd_to_denali(mtd);
267 int i;
268
269 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
270
271 for (i = 0; i < len; i++)
272 buf[i] = ioread32(denali->flash_mem + 0x10);
273}
274
275static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
276{
277 struct denali_nand_info *denali = mtd_to_denali(mtd);
278 int i;
279
280 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
281
282 for (i = 0; i < len; i++)
283 iowrite32(buf[i], denali->flash_mem + 0x10);
284}
285
286static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
287{
288 struct denali_nand_info *denali = mtd_to_denali(mtd);
289 uint16_t *buf16 = (uint16_t *)buf;
290 int i;
291
292 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
293
294 for (i = 0; i < len / 2; i++)
295 buf16[i] = ioread32(denali->flash_mem + 0x10);
296}
297
298static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
299 int len)
300{
301 struct denali_nand_info *denali = mtd_to_denali(mtd);
302 const uint16_t *buf16 = (const uint16_t *)buf;
303 int i;
304
305 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
306
307 for (i = 0; i < len / 2; i++)
308 iowrite32(buf16[i], denali->flash_mem + 0x10);
309}
310
311static uint8_t denali_read_byte(struct mtd_info *mtd)
312{
313 uint8_t byte;
314
315 denali_read_buf(mtd, &byte, 1);
316
317 return byte;
318}
319
320static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
321{
322 denali_write_buf(mtd, &byte, 1);
323}
324
325static uint16_t denali_read_word(struct mtd_info *mtd)
326{
327 uint16_t word;
328
329 denali_read_buf16(mtd, (uint8_t *)&word, 2);
330
331 return word;
332}
333
334static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
335{
336 struct denali_nand_info *denali = mtd_to_denali(mtd);
337 uint32_t type;
338
339 if (ctrl & NAND_CLE)
340 type = 0;
341 else if (ctrl & NAND_ALE)
342 type = 1;
343 else
344 return;
345
346 /*
347 * Some commands are followed by chip->dev_ready or chip->waitfunc.
348 * irq_status must be cleared here to catch the R/B# interrupt later.
349 */
350 if (ctrl & NAND_CTRL_CHANGE)
351 denali_reset_irq(denali);
352
353 index_addr(denali, MODE_11 | BANK(denali->flash_bank) | type, dat);
354}
355
356static int denali_dev_ready(struct mtd_info *mtd)
357{
358 struct denali_nand_info *denali = mtd_to_denali(mtd);
359
360 return !!(denali_check_irq(denali) & INTR__INT_ACT);
361}
362
Masahiro Yamada43914a22014-09-09 11:01:51 +0900363/*
364 * sends a pipeline command operation to the controller. See the Denali NAND
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800365 * controller's user guide for more information (section 4.2.3.6).
Jason Robertsce082592010-05-13 15:57:33 +0100366 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800367static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
Masahiro Yamada81254502014-09-16 20:04:25 +0900368 bool ecc_en, bool transfer_spare,
369 int access_type, int op)
Jason Robertsce082592010-05-13 15:57:33 +0100370{
371 int status = PASS;
Masahiro Yamada8927ad32017-03-30 15:45:49 +0900372 uint32_t addr, cmd;
Jason Robertsce082592010-05-13 15:57:33 +0100373
374 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
375
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900376 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100377
378 addr = BANK(denali->flash_bank) | denali->page;
379
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800380 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800381 cmd = MODE_01 | addr;
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800382 iowrite32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800383 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
Jason Robertsce082592010-05-13 15:57:33 +0100384 /* read spare area */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800385 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900386 index_addr(denali, cmd, access_type);
Jason Robertsce082592010-05-13 15:57:33 +0100387
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800388 cmd = MODE_01 | addr;
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800389 iowrite32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800390 } else if (op == DENALI_READ) {
Jason Robertsce082592010-05-13 15:57:33 +0100391 /* setup page read request for access type */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800392 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900393 index_addr(denali, cmd, access_type);
Jason Robertsce082592010-05-13 15:57:33 +0100394
Masahiro Yamada8927ad32017-03-30 15:45:49 +0900395 cmd = MODE_01 | addr;
396 iowrite32(cmd, denali->flash_mem);
Jason Robertsce082592010-05-13 15:57:33 +0100397 }
398 return status;
399}
400
401/* helper function that simply writes a buffer to the flash */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800402static int write_data_to_flash_mem(struct denali_nand_info *denali,
Masahiro Yamada81254502014-09-16 20:04:25 +0900403 const uint8_t *buf, int len)
Jason Robertsce082592010-05-13 15:57:33 +0100404{
Masahiro Yamada93e3c8a2014-09-09 11:01:54 +0900405 uint32_t *buf32;
406 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100407
Masahiro Yamada43914a22014-09-09 11:01:51 +0900408 /*
409 * verify that the len is a multiple of 4.
410 * see comment in read_data_from_flash_mem()
411 */
Jason Robertsce082592010-05-13 15:57:33 +0100412 BUG_ON((len % 4) != 0);
413
414 /* write the data to the flash memory */
415 buf32 = (uint32_t *)buf;
416 for (i = 0; i < len / 4; i++)
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800417 iowrite32(*buf32++, denali->flash_mem + 0x10);
Masahiro Yamada81254502014-09-16 20:04:25 +0900418 return i * 4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +0100419}
420
421/* helper function that simply reads a buffer from the flash */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800422static int read_data_from_flash_mem(struct denali_nand_info *denali,
Masahiro Yamada81254502014-09-16 20:04:25 +0900423 uint8_t *buf, int len)
Jason Robertsce082592010-05-13 15:57:33 +0100424{
Masahiro Yamada93e3c8a2014-09-09 11:01:54 +0900425 uint32_t *buf32;
426 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100427
Masahiro Yamada43914a22014-09-09 11:01:51 +0900428 /*
429 * we assume that len will be a multiple of 4, if not it would be nice
430 * to know about it ASAP rather than have random failures...
431 * This assumption is based on the fact that this function is designed
432 * to be used to read flash pages, which are typically multiples of 4.
Jason Robertsce082592010-05-13 15:57:33 +0100433 */
Jason Robertsce082592010-05-13 15:57:33 +0100434 BUG_ON((len % 4) != 0);
435
436 /* transfer the data from the flash */
437 buf32 = (uint32_t *)buf;
438 for (i = 0; i < len / 4; i++)
Jason Robertsce082592010-05-13 15:57:33 +0100439 *buf32++ = ioread32(denali->flash_mem + 0x10);
Masahiro Yamada81254502014-09-16 20:04:25 +0900440 return i * 4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +0100441}
442
443/* writes OOB data to the device */
444static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
445{
446 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada5637b692014-09-09 11:01:52 +0900447 uint32_t irq_status;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900448 uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100449 int status = 0;
450
451 denali->page = page;
452
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800453 if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800454 DENALI_WRITE) == PASS) {
Jason Robertsce082592010-05-13 15:57:33 +0100455 write_data_to_flash_mem(denali, buf, mtd->oobsize);
456
Jason Robertsce082592010-05-13 15:57:33 +0100457 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900458 irq_status = denali_wait_for_irq(denali, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100459
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900460 if (!(irq_status & INTR__PROGRAM_COMP)) {
Jamie Iles84457942011-05-06 15:28:55 +0100461 dev_err(denali->dev, "OOB write failed\n");
Jason Robertsce082592010-05-13 15:57:33 +0100462 status = -EIO;
463 }
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800464 } else {
Jamie Iles84457942011-05-06 15:28:55 +0100465 dev_err(denali->dev, "unable to send pipeline command\n");
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800466 status = -EIO;
Jason Robertsce082592010-05-13 15:57:33 +0100467 }
468 return status;
469}
470
471/* reads OOB data from the device */
472static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
473{
474 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada1aded582017-03-23 05:07:06 +0900475 uint32_t irq_mask = INTR__LOAD_COMP;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900476 uint32_t irq_status, addr, cmd;
Jason Robertsce082592010-05-13 15:57:33 +0100477
478 denali->page = page;
479
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800480 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800481 DENALI_READ) == PASS) {
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800482 read_data_from_flash_mem(denali, buf, mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +0100483
Masahiro Yamada43914a22014-09-09 11:01:51 +0900484 /*
485 * wait for command to be accepted
486 * can always use status0 bit as the
487 * mask is identical for each bank.
488 */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900489 irq_status = denali_wait_for_irq(denali, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100490
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900491 if (!(irq_status & INTR__LOAD_COMP))
Jamie Iles84457942011-05-06 15:28:55 +0100492 dev_err(denali->dev, "page on OOB timeout %d\n",
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800493 denali->page);
Jason Robertsce082592010-05-13 15:57:33 +0100494
Masahiro Yamada43914a22014-09-09 11:01:51 +0900495 /*
496 * We set the device back to MAIN_ACCESS here as I observed
Jason Robertsce082592010-05-13 15:57:33 +0100497 * instability with the controller if you do a block erase
498 * and the last transaction was a SPARE_ACCESS. Block erase
499 * is reliable (according to the MTD test infrastructure)
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800500 * if you are in MAIN_ACCESS.
Jason Robertsce082592010-05-13 15:57:33 +0100501 */
502 addr = BANK(denali->flash_bank) | denali->page;
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800503 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900504 index_addr(denali, cmd, MAIN_ACCESS);
Jason Robertsce082592010-05-13 15:57:33 +0100505 }
506}
507
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900508static int denali_check_erased_page(struct mtd_info *mtd,
509 struct nand_chip *chip, uint8_t *buf,
510 unsigned long uncor_ecc_flags,
511 unsigned int max_bitflips)
Jason Robertsce082592010-05-13 15:57:33 +0100512{
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900513 uint8_t *ecc_code = chip->buffers->ecccode;
514 int ecc_steps = chip->ecc.steps;
515 int ecc_size = chip->ecc.size;
516 int ecc_bytes = chip->ecc.bytes;
517 int i, ret, stat;
Masahiro Yamada81254502014-09-16 20:04:25 +0900518
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900519 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
520 chip->ecc.total);
521 if (ret)
522 return ret;
523
524 for (i = 0; i < ecc_steps; i++) {
525 if (!(uncor_ecc_flags & BIT(i)))
526 continue;
527
528 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
529 ecc_code, ecc_bytes,
530 NULL, 0,
531 chip->ecc.strength);
532 if (stat < 0) {
533 mtd->ecc_stats.failed++;
534 } else {
535 mtd->ecc_stats.corrected += stat;
536 max_bitflips = max_t(unsigned int, max_bitflips, stat);
537 }
538
539 buf += ecc_size;
540 ecc_code += ecc_bytes;
541 }
542
543 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100544}
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900545
Masahiro Yamada24715c72017-03-30 15:45:52 +0900546static int denali_hw_ecc_fixup(struct mtd_info *mtd,
547 struct denali_nand_info *denali,
548 unsigned long *uncor_ecc_flags)
549{
550 struct nand_chip *chip = mtd_to_nand(mtd);
551 int bank = denali->flash_bank;
552 uint32_t ecc_cor;
553 unsigned int max_bitflips;
554
555 ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
556 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
557
558 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
559 /*
560 * This flag is set when uncorrectable error occurs at least in
561 * one ECC sector. We can not know "how many sectors", or
562 * "which sector(s)". We need erase-page check for all sectors.
563 */
564 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
565 return 0;
566 }
567
568 max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
569
570 /*
571 * The register holds the maximum of per-sector corrected bitflips.
572 * This is suitable for the return value of the ->read_page() callback.
573 * Unfortunately, we can not know the total number of corrected bits in
574 * the page. Increase the stats by max_bitflips. (compromised solution)
575 */
576 mtd->ecc_stats.corrected += max_bitflips;
577
578 return max_bitflips;
579}
580
Jason Robertsce082592010-05-13 15:57:33 +0100581#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
582#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
583#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
Masahiro Yamada20d48592017-03-30 15:45:50 +0900584#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
Chuanxiao Dong8ae61eb2010-08-10 00:07:01 +0800585#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
Jason Robertsce082592010-05-13 15:57:33 +0100586#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
587
Masahiro Yamada24715c72017-03-30 15:45:52 +0900588static int denali_sw_ecc_fixup(struct mtd_info *mtd,
589 struct denali_nand_info *denali,
590 unsigned long *uncor_ecc_flags, uint8_t *buf)
Jason Robertsce082592010-05-13 15:57:33 +0100591{
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900592 unsigned int ecc_size = denali->nand.ecc.size;
Mike Dunn3f91e942012-04-25 12:06:09 -0700593 unsigned int bitflips = 0;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900594 unsigned int max_bitflips = 0;
595 uint32_t err_addr, err_cor_info;
596 unsigned int err_byte, err_sector, err_device;
597 uint8_t err_cor_value;
598 unsigned int prev_sector = 0;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900599 uint32_t irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100600
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900601 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100602
Masahiro Yamada20d48592017-03-30 15:45:50 +0900603 do {
604 err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
605 err_sector = ECC_SECTOR(err_addr);
606 err_byte = ECC_BYTE(err_addr);
Jason Robertsce082592010-05-13 15:57:33 +0100607
Masahiro Yamada20d48592017-03-30 15:45:50 +0900608 err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
609 err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
610 err_device = ECC_ERR_DEVICE(err_cor_info);
Jason Robertsce082592010-05-13 15:57:33 +0100611
Masahiro Yamada20d48592017-03-30 15:45:50 +0900612 /* reset the bitflip counter when crossing ECC sector */
613 if (err_sector != prev_sector)
614 bitflips = 0;
Masahiro Yamada81254502014-09-16 20:04:25 +0900615
Masahiro Yamada20d48592017-03-30 15:45:50 +0900616 if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
617 /*
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900618 * Check later if this is a real ECC error, or
619 * an erased sector.
Masahiro Yamada20d48592017-03-30 15:45:50 +0900620 */
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900621 *uncor_ecc_flags |= BIT(err_sector);
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900622 } else if (err_byte < ecc_size) {
Masahiro Yamada20d48592017-03-30 15:45:50 +0900623 /*
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900624 * If err_byte is larger than ecc_size, means error
Masahiro Yamada20d48592017-03-30 15:45:50 +0900625 * happened in OOB, so we ignore it. It's no need for
626 * us to correct it err_device is represented the NAND
627 * error bits are happened in if there are more than
628 * one NAND connected.
629 */
630 int offset;
631 unsigned int flips_in_byte;
632
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900633 offset = (err_sector * ecc_size + err_byte) *
Masahiro Yamada20d48592017-03-30 15:45:50 +0900634 denali->devnum + err_device;
635
636 /* correct the ECC error */
637 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
638 buf[offset] ^= err_cor_value;
639 mtd->ecc_stats.corrected += flips_in_byte;
640 bitflips += flips_in_byte;
641
642 max_bitflips = max(max_bitflips, bitflips);
643 }
644
645 prev_sector = err_sector;
646 } while (!ECC_LAST_ERR(err_cor_info));
647
648 /*
649 * Once handle all ecc errors, controller will trigger a
650 * ECC_TRANSACTION_DONE interrupt, so here just wait for
651 * a while for this interrupt
652 */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900653 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
654 if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
655 return -EIO;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900656
657 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100658}
659
660/* programs the controller to either enable/disable DMA transfers */
David Woodhouseaadff492010-05-13 16:12:43 +0100661static void denali_enable_dma(struct denali_nand_info *denali, bool en)
Jason Robertsce082592010-05-13 15:57:33 +0100662{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900663 iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100664 ioread32(denali->flash_reg + DMA_ENABLE);
665}
666
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900667static void denali_setup_dma64(struct denali_nand_info *denali, int op)
668{
669 uint32_t mode;
670 const int page_count = 1;
671 uint64_t addr = denali->buf.dma_buf;
672
673 mode = MODE_10 | BANK(denali->flash_bank) | denali->page;
674
675 /* DMA is a three step process */
676
677 /*
678 * 1. setup transfer type, interrupt when complete,
679 * burst len = 64 bytes, the number of pages
680 */
681 index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
682
683 /* 2. set memory low address */
684 index_addr(denali, mode, addr);
685
686 /* 3. set memory high address */
687 index_addr(denali, mode, addr >> 32);
688}
689
690static void denali_setup_dma32(struct denali_nand_info *denali, int op)
Jason Robertsce082592010-05-13 15:57:33 +0100691{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900692 uint32_t mode;
Jason Robertsce082592010-05-13 15:57:33 +0100693 const int page_count = 1;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900694 uint32_t addr = denali->buf.dma_buf;
Jason Robertsce082592010-05-13 15:57:33 +0100695
696 mode = MODE_10 | BANK(denali->flash_bank);
697
698 /* DMA is a four step process */
699
700 /* 1. setup transfer type and # of pages */
701 index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
702
703 /* 2. set memory high address bits 23:8 */
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900704 index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
Jason Robertsce082592010-05-13 15:57:33 +0100705
706 /* 3. set memory low address bits 23:8 */
Graham Moore7c272ac2015-01-09 09:32:35 -0600707 index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
Jason Robertsce082592010-05-13 15:57:33 +0100708
Masahiro Yamada43914a22014-09-09 11:01:51 +0900709 /* 4. interrupt when complete, burst len = 64 bytes */
Jason Robertsce082592010-05-13 15:57:33 +0100710 index_addr(denali, mode | 0x14000, 0x2400);
711}
712
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900713static void denali_setup_dma(struct denali_nand_info *denali, int op)
714{
715 if (denali->caps & DENALI_CAP_DMA_64BIT)
716 denali_setup_dma64(denali, op);
717 else
718 denali_setup_dma32(denali, op);
719}
720
Masahiro Yamada43914a22014-09-09 11:01:51 +0900721/*
722 * writes a page. user specifies type, and this function handles the
723 * configuration details.
724 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800725static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900726 const uint8_t *buf, int page, bool raw_xfer)
Jason Robertsce082592010-05-13 15:57:33 +0100727{
728 struct denali_nand_info *denali = mtd_to_denali(mtd);
Jason Robertsce082592010-05-13 15:57:33 +0100729 dma_addr_t addr = denali->buf.dma_buf;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100730 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900731 uint32_t irq_status;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900732 uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900733 int ret = 0;
734
735 denali->page = page;
Jason Robertsce082592010-05-13 15:57:33 +0100736
Masahiro Yamada43914a22014-09-09 11:01:51 +0900737 /*
738 * if it is a raw xfer, we want to disable ecc and send the spare area.
Jason Robertsce082592010-05-13 15:57:33 +0100739 * !raw_xfer - enable ecc
740 * raw_xfer - transfer spare
741 */
742 setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
743
744 /* copy buffer into DMA buffer */
745 memcpy(denali->buf.buf, buf, mtd->writesize);
746
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800747 if (raw_xfer) {
Jason Robertsce082592010-05-13 15:57:33 +0100748 /* transfer the data to the spare area */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800749 memcpy(denali->buf.buf + mtd->writesize,
750 chip->oob_poi,
751 mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +0100752 }
753
Jamie Iles84457942011-05-06 15:28:55 +0100754 dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100755
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900756 denali_reset_irq(denali);
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800757 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +0100758
David Woodhouseaadff492010-05-13 16:12:43 +0100759 denali_setup_dma(denali, DENALI_WRITE);
Jason Robertsce082592010-05-13 15:57:33 +0100760
761 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900762 irq_status = denali_wait_for_irq(denali, irq_mask);
763 if (!(irq_status & INTR__DMA_CMD_COMP)) {
Masahiro Yamada81254502014-09-16 20:04:25 +0900764 dev_err(denali->dev, "timeout on write_page (type = %d)\n",
765 raw_xfer);
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900766 ret = -EIO;
Jason Robertsce082592010-05-13 15:57:33 +0100767 }
768
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800769 denali_enable_dma(denali, false);
Jamie Iles84457942011-05-06 15:28:55 +0100770 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
Josh Wufdbad98d2012-06-25 18:07:45 +0800771
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900772 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100773}
774
775/* NAND core entry points */
776
Masahiro Yamada43914a22014-09-09 11:01:51 +0900777/*
778 * this is the callback that the NAND core calls to write a page. Since
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800779 * writing a page with ECC or without is similar, all the work is done
780 * by write_page above.
Masahiro Yamada43914a22014-09-09 11:01:51 +0900781 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800782static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
Boris BREZILLON45aaeff2015-10-13 11:22:18 +0200783 const uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100784{
Masahiro Yamada43914a22014-09-09 11:01:51 +0900785 /*
786 * for regular page writes, we let HW handle all the ECC
787 * data written to the device.
788 */
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900789 return write_page(mtd, chip, buf, page, false);
Jason Robertsce082592010-05-13 15:57:33 +0100790}
791
Masahiro Yamada43914a22014-09-09 11:01:51 +0900792/*
793 * This is the callback that the NAND core calls to write a page without ECC.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300794 * raw access is similar to ECC page writes, so all the work is done in the
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800795 * write_page() function above.
Jason Robertsce082592010-05-13 15:57:33 +0100796 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800797static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Boris BREZILLON45aaeff2015-10-13 11:22:18 +0200798 const uint8_t *buf, int oob_required,
799 int page)
Jason Robertsce082592010-05-13 15:57:33 +0100800{
Masahiro Yamada43914a22014-09-09 11:01:51 +0900801 /*
802 * for raw page writes, we want to disable ECC and simply write
803 * whatever data is in the buffer.
804 */
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900805 return write_page(mtd, chip, buf, page, true);
Jason Robertsce082592010-05-13 15:57:33 +0100806}
807
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800808static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +0100809 int page)
810{
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800811 return write_oob_data(mtd, chip->oob_poi, page);
Jason Robertsce082592010-05-13 15:57:33 +0100812}
813
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800814static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300815 int page)
Jason Robertsce082592010-05-13 15:57:33 +0100816{
817 read_oob_data(mtd, chip->oob_poi, page);
818
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300819 return 0;
Jason Robertsce082592010-05-13 15:57:33 +0100820}
821
822static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700823 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100824{
825 struct denali_nand_info *denali = mtd_to_denali(mtd);
Jason Robertsce082592010-05-13 15:57:33 +0100826 dma_addr_t addr = denali->buf.dma_buf;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100827 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900828 uint32_t irq_status;
Masahiro Yamada24715c72017-03-30 15:45:52 +0900829 uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ?
830 INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR :
831 INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR;
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900832 unsigned long uncor_ecc_flags = 0;
833 int stat = 0;
Jason Robertsce082592010-05-13 15:57:33 +0100834
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900835 denali->page = page;
Chuanxiao Dong7d8a26f2010-08-11 18:19:23 +0800836
Jason Robertsce082592010-05-13 15:57:33 +0100837 setup_ecc_for_xfer(denali, true, false);
838
David Woodhouseaadff492010-05-13 16:12:43 +0100839 denali_enable_dma(denali, true);
Jamie Iles84457942011-05-06 15:28:55 +0100840 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100841
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900842 denali_reset_irq(denali);
David Woodhouseaadff492010-05-13 16:12:43 +0100843 denali_setup_dma(denali, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +0100844
845 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900846 irq_status = denali_wait_for_irq(denali, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100847
Jamie Iles84457942011-05-06 15:28:55 +0100848 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100849
850 memcpy(buf, denali->buf.buf, mtd->writesize);
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800851
Masahiro Yamada24715c72017-03-30 15:45:52 +0900852 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
853 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
854 else if (irq_status & INTR__ECC_ERR)
855 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
David Woodhouseaadff492010-05-13 16:12:43 +0100856 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100857
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900858 if (stat < 0)
859 return stat;
860
861 if (uncor_ecc_flags) {
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100862 read_oob_data(mtd, chip->oob_poi, denali->page);
Jason Robertsce082592010-05-13 15:57:33 +0100863
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900864 stat = denali_check_erased_page(mtd, chip, buf,
865 uncor_ecc_flags, stat);
Jason Robertsce082592010-05-13 15:57:33 +0100866 }
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900867
868 return stat;
Jason Robertsce082592010-05-13 15:57:33 +0100869}
870
871static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700872 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100873{
874 struct denali_nand_info *denali = mtd_to_denali(mtd);
Jason Robertsce082592010-05-13 15:57:33 +0100875 dma_addr_t addr = denali->buf.dma_buf;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100876 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900877 uint32_t irq_mask = INTR__DMA_CMD_COMP;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900878 uint32_t irq_status;
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800879
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900880 denali->page = page;
Chuanxiao Dong7d8a26f2010-08-11 18:19:23 +0800881
Jason Robertsce082592010-05-13 15:57:33 +0100882 setup_ecc_for_xfer(denali, false, true);
David Woodhouseaadff492010-05-13 16:12:43 +0100883 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +0100884
Jamie Iles84457942011-05-06 15:28:55 +0100885 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100886
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900887 denali_reset_irq(denali);
David Woodhouseaadff492010-05-13 16:12:43 +0100888 denali_setup_dma(denali, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +0100889
890 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900891 irq_status = denali_wait_for_irq(denali, irq_mask);
892 if (irq_status & INTR__DMA_CMD_COMP)
893 return -ETIMEDOUT;
Jason Robertsce082592010-05-13 15:57:33 +0100894
Jamie Iles84457942011-05-06 15:28:55 +0100895 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100896
David Woodhouseaadff492010-05-13 16:12:43 +0100897 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100898
899 memcpy(buf, denali->buf.buf, mtd->writesize);
900 memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
901
902 return 0;
903}
904
Jason Robertsce082592010-05-13 15:57:33 +0100905static void denali_select_chip(struct mtd_info *mtd, int chip)
906{
907 struct denali_nand_info *denali = mtd_to_denali(mtd);
Chuanxiao Dong7cfffac2010-08-10 00:16:51 +0800908
Jason Robertsce082592010-05-13 15:57:33 +0100909 denali->flash_bank = chip;
Jason Robertsce082592010-05-13 15:57:33 +0100910}
911
912static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
913{
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900914 struct denali_nand_info *denali = mtd_to_denali(mtd);
915 uint32_t irq_status;
916
917 /* R/B# pin transitioned from low to high? */
918 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
919
920 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100921}
922
Brian Norris49c50b92014-05-06 16:02:19 -0700923static int denali_erase(struct mtd_info *mtd, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100924{
925 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada5637b692014-09-09 11:01:52 +0900926 uint32_t cmd, irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100927
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900928 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100929
930 /* setup page read request for access type */
931 cmd = MODE_10 | BANK(denali->flash_bank) | page;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900932 index_addr(denali, cmd, 0x1);
Jason Robertsce082592010-05-13 15:57:33 +0100933
934 /* wait for erase to complete or failure to occur */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900935 irq_status = denali_wait_for_irq(denali,
936 INTR__ERASE_COMP | INTR__ERASE_FAIL);
Jason Robertsce082592010-05-13 15:57:33 +0100937
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900938 return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100939}
940
Masahiro Yamada1bb88662017-06-13 22:45:37 +0900941#define DIV_ROUND_DOWN_ULL(ll, d) \
942 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
943
944static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
945 const struct nand_data_interface *conf)
946{
947 struct denali_nand_info *denali = mtd_to_denali(mtd);
948 const struct nand_sdr_timings *timings;
949 unsigned long t_clk;
950 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
951 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
952 int addr_2_data_mask;
953 uint32_t tmp;
954
955 timings = nand_get_sdr_timings(conf);
956 if (IS_ERR(timings))
957 return PTR_ERR(timings);
958
959 /* clk_x period in picoseconds */
960 t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
961 if (!t_clk)
962 return -EINVAL;
963
964 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
965 return 0;
966
967 /* tREA -> ACC_CLKS */
968 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
969 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
970
971 tmp = ioread32(denali->flash_reg + ACC_CLKS);
972 tmp &= ~ACC_CLKS__VALUE;
973 tmp |= acc_clks;
974 iowrite32(tmp, denali->flash_reg + ACC_CLKS);
975
976 /* tRWH -> RE_2_WE */
977 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
978 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
979
980 tmp = ioread32(denali->flash_reg + RE_2_WE);
981 tmp &= ~RE_2_WE__VALUE;
982 tmp |= re_2_we;
983 iowrite32(tmp, denali->flash_reg + RE_2_WE);
984
985 /* tRHZ -> RE_2_RE */
986 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
987 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
988
989 tmp = ioread32(denali->flash_reg + RE_2_RE);
990 tmp &= ~RE_2_RE__VALUE;
991 tmp |= re_2_re;
992 iowrite32(tmp, denali->flash_reg + RE_2_RE);
993
994 /* tWHR -> WE_2_RE */
995 we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
996 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
997
998 tmp = ioread32(denali->flash_reg + TWHR2_AND_WE_2_RE);
999 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
1000 tmp |= we_2_re;
1001 iowrite32(tmp, denali->flash_reg + TWHR2_AND_WE_2_RE);
1002
1003 /* tADL -> ADDR_2_DATA */
1004
1005 /* for older versions, ADDR_2_DATA is only 6 bit wide */
1006 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1007 if (denali->revision < 0x0501)
1008 addr_2_data_mask >>= 1;
1009
1010 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
1011 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1012
1013 tmp = ioread32(denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1014 tmp &= ~addr_2_data_mask;
1015 tmp |= addr_2_data;
1016 iowrite32(tmp, denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1017
1018 /* tREH, tWH -> RDWR_EN_HI_CNT */
1019 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1020 t_clk);
1021 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1022
1023 tmp = ioread32(denali->flash_reg + RDWR_EN_HI_CNT);
1024 tmp &= ~RDWR_EN_HI_CNT__VALUE;
1025 tmp |= rdwr_en_hi;
1026 iowrite32(tmp, denali->flash_reg + RDWR_EN_HI_CNT);
1027
1028 /* tRP, tWP -> RDWR_EN_LO_CNT */
1029 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
1030 t_clk);
1031 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1032 t_clk);
1033 rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
1034 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1035 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1036
1037 tmp = ioread32(denali->flash_reg + RDWR_EN_LO_CNT);
1038 tmp &= ~RDWR_EN_LO_CNT__VALUE;
1039 tmp |= rdwr_en_lo;
1040 iowrite32(tmp, denali->flash_reg + RDWR_EN_LO_CNT);
1041
1042 /* tCS, tCEA -> CS_SETUP_CNT */
1043 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
1044 (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
1045 0);
1046 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1047
1048 tmp = ioread32(denali->flash_reg + CS_SETUP_CNT);
1049 tmp &= ~CS_SETUP_CNT__VALUE;
1050 tmp |= cs_setup;
1051 iowrite32(tmp, denali->flash_reg + CS_SETUP_CNT);
1052
1053 return 0;
1054}
Jason Robertsce082592010-05-13 15:57:33 +01001055
1056/* Initialization code to bring the device up to a known good state */
1057static void denali_hw_init(struct denali_nand_info *denali)
1058{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001059 /*
Masahiro Yamadae7beeee2017-03-30 15:45:57 +09001060 * The REVISION register may not be reliable. Platforms are allowed to
1061 * override it.
1062 */
1063 if (!denali->revision)
1064 denali->revision =
1065 swab16(ioread32(denali->flash_reg + REVISION));
1066
1067 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001068 * tell driver how many bit controller will skip before
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001069 * writing ECC code in OOB, this register may be already
1070 * set by firmware. So we read this value out.
1071 * if this value is 0, just let it be.
Masahiro Yamada43914a22014-09-09 11:01:51 +09001072 */
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001073 denali->bbtskipbytes = ioread32(denali->flash_reg +
1074 SPARE_AREA_SKIP_BYTES);
Jamie Ilesbc27ede2011-06-06 17:11:34 +01001075 detect_max_banks(denali);
Chuanxiao Dongeda936e2010-07-27 14:17:37 +08001076 denali_nand_reset(denali);
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001077 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1078 iowrite32(CHIP_EN_DONT_CARE__FLAG,
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001079 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
Jason Robertsce082592010-05-13 15:57:33 +01001080
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001081 iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
Jason Robertsce082592010-05-13 15:57:33 +01001082
1083 /* Should set value for these registers when init */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001084 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1085 iowrite32(1, denali->flash_reg + ECC_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +01001086}
1087
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001088int denali_calc_ecc_bytes(int step_size, int strength)
1089{
1090 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1091 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1092}
1093EXPORT_SYMBOL(denali_calc_ecc_bytes);
1094
1095static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
1096 struct denali_nand_info *denali)
1097{
1098 int oobavail = mtd->oobsize - denali->bbtskipbytes;
1099 int ret;
1100
1101 /*
1102 * If .size and .strength are already set (usually by DT),
1103 * check if they are supported by this controller.
1104 */
1105 if (chip->ecc.size && chip->ecc.strength)
1106 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
1107
1108 /*
1109 * We want .size and .strength closest to the chip's requirement
1110 * unless NAND_ECC_MAXIMIZE is requested.
1111 */
1112 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
1113 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
1114 if (!ret)
1115 return 0;
1116 }
1117
1118 /* Max ECC strength is the last thing we can do */
1119 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
1120}
Boris Brezillon14fad622016-02-03 20:00:11 +01001121
1122static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1123 struct mtd_oob_region *oobregion)
1124{
1125 struct denali_nand_info *denali = mtd_to_denali(mtd);
1126 struct nand_chip *chip = mtd_to_nand(mtd);
1127
1128 if (section)
1129 return -ERANGE;
1130
1131 oobregion->offset = denali->bbtskipbytes;
1132 oobregion->length = chip->ecc.total;
1133
1134 return 0;
1135}
1136
1137static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1138 struct mtd_oob_region *oobregion)
1139{
1140 struct denali_nand_info *denali = mtd_to_denali(mtd);
1141 struct nand_chip *chip = mtd_to_nand(mtd);
1142
1143 if (section)
1144 return -ERANGE;
1145
1146 oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
1147 oobregion->length = mtd->oobsize - oobregion->offset;
1148
1149 return 0;
1150}
1151
1152static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1153 .ecc = denali_ooblayout_ecc,
1154 .free = denali_ooblayout_free,
Jason Robertsce082592010-05-13 15:57:33 +01001155};
1156
1157static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1158static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1159
1160static struct nand_bbt_descr bbt_main_descr = {
1161 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1162 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1163 .offs = 8,
1164 .len = 4,
1165 .veroffs = 12,
1166 .maxblocks = 4,
1167 .pattern = bbt_pattern,
1168};
1169
1170static struct nand_bbt_descr bbt_mirror_descr = {
1171 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1172 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1173 .offs = 8,
1174 .len = 4,
1175 .veroffs = 12,
1176 .maxblocks = 4,
1177 .pattern = mirror_pattern,
1178};
1179
Uwe Kleine-König421f91d2010-06-11 12:17:00 +02001180/* initialize driver data structures */
Brian Norris8c519432013-08-10 22:57:30 -07001181static void denali_drv_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001182{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001183 /*
1184 * the completion object will be used to notify
1185 * the callee that the interrupt is done
1186 */
Jason Robertsce082592010-05-13 15:57:33 +01001187 init_completion(&denali->complete);
1188
Masahiro Yamada43914a22014-09-09 11:01:51 +09001189 /*
1190 * the spinlock will be used to synchronize the ISR with any
1191 * element that might be access shared data (interrupt status)
1192 */
Jason Robertsce082592010-05-13 15:57:33 +01001193 spin_lock_init(&denali->irq_lock);
1194
1195 /* indicate that MTD has not selected a valid bank yet */
1196 denali->flash_bank = CHIP_SELECT_INVALID;
Jason Robertsce082592010-05-13 15:57:33 +01001197}
1198
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001199static int denali_multidev_fixup(struct denali_nand_info *denali)
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001200{
1201 struct nand_chip *chip = &denali->nand;
1202 struct mtd_info *mtd = nand_to_mtd(chip);
1203
1204 /*
1205 * Support for multi device:
1206 * When the IP configuration is x16 capable and two x8 chips are
1207 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1208 * In this case, the core framework knows nothing about this fact,
1209 * so we should tell it the _logical_ pagesize and anything necessary.
1210 */
1211 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1212
Masahiro Yamadacc5d8032017-03-23 05:07:22 +09001213 /*
1214 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1215 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1216 */
1217 if (denali->devnum == 0) {
1218 denali->devnum = 1;
1219 iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
1220 }
1221
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001222 if (denali->devnum == 1)
1223 return 0;
1224
1225 if (denali->devnum != 2) {
1226 dev_err(denali->dev, "unsupported number of devices %d\n",
1227 denali->devnum);
1228 return -EINVAL;
1229 }
1230
1231 /* 2 chips in parallel */
1232 mtd->size <<= 1;
1233 mtd->erasesize <<= 1;
1234 mtd->writesize <<= 1;
1235 mtd->oobsize <<= 1;
1236 chip->chipsize <<= 1;
1237 chip->page_shift += 1;
1238 chip->phys_erase_shift += 1;
1239 chip->bbt_erase_shift += 1;
1240 chip->chip_shift += 1;
1241 chip->pagemask <<= 1;
1242 chip->ecc.size <<= 1;
1243 chip->ecc.bytes <<= 1;
1244 chip->ecc.strength <<= 1;
1245 denali->bbtskipbytes <<= 1;
1246
1247 return 0;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001248}
1249
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001250int denali_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001251{
Masahiro Yamada1394a722017-03-23 05:07:17 +09001252 struct nand_chip *chip = &denali->nand;
1253 struct mtd_info *mtd = nand_to_mtd(chip);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001254 int ret;
Jason Robertsce082592010-05-13 15:57:33 +01001255
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001256 mtd->dev.parent = denali->dev;
Jason Robertsce082592010-05-13 15:57:33 +01001257 denali_hw_init(denali);
1258 denali_drv_init(denali);
1259
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001260 denali_clear_irq_all(denali);
1261
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001262 /* Request IRQ after all the hardware initialization is finished */
1263 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1264 IRQF_SHARED, DENALI_NAND_NAME, denali);
1265 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001266 dev_err(denali->dev, "Unable to request IRQ\n");
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001267 return ret;
Jason Robertsce082592010-05-13 15:57:33 +01001268 }
1269
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001270 denali_enable_irq(denali);
1271
Masahiro Yamada63757d42017-03-23 05:07:18 +09001272 nand_set_flash_node(chip, denali->dev->of_node);
Masahiro Yamada8aabdf32017-03-30 15:45:48 +09001273 /* Fallback to the default name if DT did not give "label" property */
1274 if (!mtd->name)
1275 mtd->name = "denali-nand";
Jason Robertsce082592010-05-13 15:57:33 +01001276
1277 /* register the driver with the NAND core subsystem */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001278 chip->select_chip = denali_select_chip;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001279 chip->read_byte = denali_read_byte;
Masahiro Yamadafa6134e2017-06-13 22:45:39 +09001280 chip->write_byte = denali_write_byte;
1281 chip->read_word = denali_read_word;
1282 chip->cmd_ctrl = denali_cmd_ctrl;
1283 chip->dev_ready = denali_dev_ready;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001284 chip->waitfunc = denali_waitfunc;
Jason Robertsce082592010-05-13 15:57:33 +01001285
Masahiro Yamada1bb88662017-06-13 22:45:37 +09001286 /* clk rate info is needed for setup_data_interface */
1287 if (denali->clk_x_rate)
1288 chip->setup_data_interface = denali_setup_data_interface;
1289
Masahiro Yamada43914a22014-09-09 11:01:51 +09001290 /*
1291 * scan for NAND devices attached to the controller
Jason Robertsce082592010-05-13 15:57:33 +01001292 * this is the first stage in a two step process to register
Masahiro Yamada43914a22014-09-09 11:01:51 +09001293 * with the nand subsystem
1294 */
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001295 ret = nand_scan_ident(mtd, denali->max_banks, NULL);
1296 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001297 goto disable_irq;
Chuanxiao5bac3acf2010-08-05 23:06:04 +08001298
Huang Shijiee07caa32013-12-21 00:02:28 +08001299 denali->buf.buf = devm_kzalloc(denali->dev,
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001300 mtd->writesize + mtd->oobsize,
Huang Shijiee07caa32013-12-21 00:02:28 +08001301 GFP_KERNEL);
1302 if (!denali->buf.buf) {
1303 ret = -ENOMEM;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001304 goto disable_irq;
Huang Shijiee07caa32013-12-21 00:02:28 +08001305 }
1306
Masahiro Yamada210a2c82017-03-30 15:45:54 +09001307 ret = dma_set_mask(denali->dev,
1308 DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ?
1309 64 : 32));
Huang Shijiee07caa32013-12-21 00:02:28 +08001310 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001311 dev_err(denali->dev, "No usable DMA configuration\n");
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001312 goto disable_irq;
Huang Shijiee07caa32013-12-21 00:02:28 +08001313 }
1314
1315 denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001316 mtd->writesize + mtd->oobsize,
Huang Shijiee07caa32013-12-21 00:02:28 +08001317 DMA_BIDIRECTIONAL);
1318 if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001319 dev_err(denali->dev, "Failed to map DMA buffer\n");
Huang Shijiee07caa32013-12-21 00:02:28 +08001320 ret = -EIO;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001321 goto disable_irq;
Chuanxiao.Dong664065242010-08-06 18:48:21 +08001322 }
1323
Masahiro Yamada43914a22014-09-09 11:01:51 +09001324 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001325 * second stage of the NAND scan
Chuanxiao5bac3acf2010-08-05 23:06:04 +08001326 * this stage requires information regarding ECC and
Masahiro Yamada43914a22014-09-09 11:01:51 +09001327 * bad block management.
1328 */
Jason Robertsce082592010-05-13 15:57:33 +01001329
1330 /* Bad block management */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001331 chip->bbt_td = &bbt_main_descr;
1332 chip->bbt_md = &bbt_mirror_descr;
Jason Robertsce082592010-05-13 15:57:33 +01001333
1334 /* skip the scan for now until we have OOB read and write support */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001335 chip->bbt_options |= NAND_BBT_USE_FLASH;
1336 chip->options |= NAND_SKIP_BBTSCAN;
1337 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
Jason Robertsce082592010-05-13 15:57:33 +01001338
Graham Moored99d7282015-01-14 09:38:50 -06001339 /* no subpage writes on denali */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001340 chip->options |= NAND_NO_SUBPAGE_WRITE;
Graham Moored99d7282015-01-14 09:38:50 -06001341
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001342 ret = denali_ecc_setup(mtd, chip, denali);
1343 if (ret) {
1344 dev_err(denali->dev, "Failed to setup ECC settings.\n");
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001345 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001346 }
1347
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001348 dev_dbg(denali->dev,
1349 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1350 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1351
1352 iowrite32(chip->ecc.strength, denali->flash_reg + ECC_CORRECTION);
Masahiro Yamada0615e7a2017-06-07 20:52:13 +09001353 iowrite32(mtd->erasesize / mtd->writesize,
1354 denali->flash_reg + PAGES_PER_BLOCK);
1355 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1356 denali->flash_reg + DEVICE_WIDTH);
1357 iowrite32(mtd->writesize, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
1358 iowrite32(mtd->oobsize, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001359
1360 iowrite32(chip->ecc.size, denali->flash_reg + CFG_DATA_BLOCK_SIZE);
1361 iowrite32(chip->ecc.size, denali->flash_reg + CFG_LAST_DATA_BLOCK_SIZE);
1362 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1363 iowrite32(mtd->writesize / chip->ecc.size,
1364 denali->flash_reg + CFG_NUM_DATA_BLOCKS);
1365
Boris Brezillon14fad622016-02-03 20:00:11 +01001366 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001367
Masahiro Yamadafa6134e2017-06-13 22:45:39 +09001368 if (chip->options & NAND_BUSWIDTH_16) {
1369 chip->read_buf = denali_read_buf16;
1370 chip->write_buf = denali_write_buf16;
1371 } else {
1372 chip->read_buf = denali_read_buf;
1373 chip->write_buf = denali_write_buf;
1374 }
Masahiro Yamadab21ff822017-06-13 22:45:35 +09001375 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001376 chip->ecc.read_page = denali_read_page;
1377 chip->ecc.read_page_raw = denali_read_page_raw;
1378 chip->ecc.write_page = denali_write_page;
1379 chip->ecc.write_page_raw = denali_write_page_raw;
1380 chip->ecc.read_oob = denali_read_oob;
1381 chip->ecc.write_oob = denali_write_oob;
1382 chip->erase = denali_erase;
Jason Robertsce082592010-05-13 15:57:33 +01001383
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001384 ret = denali_multidev_fixup(denali);
1385 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001386 goto disable_irq;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001387
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001388 ret = nand_scan_tail(mtd);
1389 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001390 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001391
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001392 ret = mtd_device_register(mtd, NULL, 0);
Jason Robertsce082592010-05-13 15:57:33 +01001393 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001394 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001395 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001396 }
1397 return 0;
1398
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001399disable_irq:
1400 denali_disable_irq(denali);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001401
Jason Robertsce082592010-05-13 15:57:33 +01001402 return ret;
1403}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001404EXPORT_SYMBOL(denali_init);
Jason Robertsce082592010-05-13 15:57:33 +01001405
1406/* driver exit point */
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001407void denali_remove(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001408{
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001409 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
Boris BREZILLON320092a2015-12-11 15:02:34 +01001410 /*
1411 * Pre-compute DMA buffer size to avoid any problems in case
1412 * nand_release() ever changes in a way that mtd->writesize and
1413 * mtd->oobsize are not reliable after this call.
1414 */
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001415 int bufsize = mtd->writesize + mtd->oobsize;
Boris BREZILLON320092a2015-12-11 15:02:34 +01001416
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001417 nand_release(mtd);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001418 denali_disable_irq(denali);
Boris BREZILLON320092a2015-12-11 15:02:34 +01001419 dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize,
Masahiro Yamada81254502014-09-16 20:04:25 +09001420 DMA_BIDIRECTIONAL);
Jason Robertsce082592010-05-13 15:57:33 +01001421}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001422EXPORT_SYMBOL(denali_remove);