blob: 4ba8ad610381766f7263f296590e363d81952049 [file] [log] [blame]
Jason Robertsce082592010-05-13 15:57:33 +01001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
Jason Robertsce082592010-05-13 15:57:33 +010019#include <linux/interrupt.h>
20#include <linux/delay.h>
Jamie Iles84457942011-05-06 15:28:55 +010021#include <linux/dma-mapping.h>
Jason Robertsce082592010-05-13 15:57:33 +010022#include <linux/wait.h>
23#include <linux/mutex.h>
Jason Robertsce082592010-05-13 15:57:33 +010024#include <linux/mtd/mtd.h>
25#include <linux/module.h>
26
27#include "denali.h"
28
29MODULE_LICENSE("GPL");
30
Jason Robertsce082592010-05-13 15:57:33 +010031#define DENALI_NAND_NAME "denali-nand"
32
Masahiro Yamada43914a22014-09-09 11:01:51 +090033/*
Masahiro Yamada43914a22014-09-09 11:01:51 +090034 * indicates whether or not the internal value for the flash bank is
35 * valid or not
36 */
Chuanxiao5bac3acf2010-08-05 23:06:04 +080037#define CHIP_SELECT_INVALID -1
Jason Robertsce082592010-05-13 15:57:33 +010038
Masahiro Yamadac19e31d2017-06-13 22:45:38 +090039#define DENALI_NR_BANKS 4
40
Masahiro Yamada43914a22014-09-09 11:01:51 +090041/*
Masahiro Yamada1bb88662017-06-13 22:45:37 +090042 * The bus interface clock, clk_x, is phase aligned with the core clock. The
43 * clk_x is an integral multiple N of the core clk. The value N is configured
44 * at IP delivery time, and its available value is 4, 5, or 6. We need to align
45 * to the largest value to make it work with any possible configuration.
Masahiro Yamada43914a22014-09-09 11:01:51 +090046 */
Masahiro Yamada1bb88662017-06-13 22:45:37 +090047#define DENALI_CLK_X_MULT 6
Jason Robertsce082592010-05-13 15:57:33 +010048
Masahiro Yamada43914a22014-09-09 11:01:51 +090049/*
50 * this macro allows us to convert from an MTD structure to our own
Jason Robertsce082592010-05-13 15:57:33 +010051 * device context (denali) structure.
52 */
Boris BREZILLON442f201b2015-12-11 15:06:00 +010053static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
54{
55 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
56}
Jason Robertsce082592010-05-13 15:57:33 +010057
Masahiro Yamada43914a22014-09-09 11:01:51 +090058/*
59 * These constants are defined by the driver to enable common driver
60 * configuration options.
61 */
Jason Robertsce082592010-05-13 15:57:33 +010062#define SPARE_ACCESS 0x41
63#define MAIN_ACCESS 0x42
64#define MAIN_SPARE_ACCESS 0x43
65
66#define DENALI_READ 0
67#define DENALI_WRITE 0x100
68
Masahiro Yamada43914a22014-09-09 11:01:51 +090069/*
70 * this is a helper macro that allows us to
71 * format the bank into the proper bits for the controller
72 */
Jason Robertsce082592010-05-13 15:57:33 +010073#define BANK(x) ((x) << 24)
74
Masahiro Yamada43914a22014-09-09 11:01:51 +090075/*
76 * Certain operations for the denali NAND controller use an indexed mode to
77 * read/write data. The operation is performed by writing the address value
78 * of the command to the device memory followed by the data. This function
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080079 * abstracts this common operation.
Masahiro Yamada43914a22014-09-09 11:01:51 +090080 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080081static void index_addr(struct denali_nand_info *denali,
82 uint32_t address, uint32_t data)
Jason Robertsce082592010-05-13 15:57:33 +010083{
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +080084 iowrite32(address, denali->flash_mem);
85 iowrite32(data, denali->flash_mem + 0x10);
Jason Robertsce082592010-05-13 15:57:33 +010086}
87
Masahiro Yamada43914a22014-09-09 11:01:51 +090088/*
Jamie Ilesc89eeda2011-05-06 15:28:57 +010089 * Use the configuration feature register to determine the maximum number of
90 * banks that the hardware supports.
91 */
92static void detect_max_banks(struct denali_nand_info *denali)
93{
94 uint32_t features = ioread32(denali->flash_reg + FEATURES);
95
Masahiro Yamadae7beeee2017-03-30 15:45:57 +090096 denali->max_banks = 1 << (features & FEATURES__N_BANKS);
97
98 /* the encoding changed from rev 5.0 to 5.1 */
99 if (denali->revision < 0x0501)
100 denali->max_banks <<= 1;
Jamie Ilesc89eeda2011-05-06 15:28:57 +0100101}
102
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900103static void denali_enable_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100104{
Jamie Iles9589bf52011-05-06 15:28:56 +0100105 int i;
106
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900107 for (i = 0; i < DENALI_NR_BANKS; i++)
108 iowrite32(U32_MAX, denali->flash_reg + INTR_EN(i));
109 iowrite32(GLOBAL_INT_EN_FLAG, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100110}
111
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900112static void denali_disable_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100113{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900114 int i;
115
116 for (i = 0; i < DENALI_NR_BANKS; i++)
117 iowrite32(0, denali->flash_reg + INTR_EN(i));
118 iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100119}
120
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900121static void denali_clear_irq(struct denali_nand_info *denali,
122 int bank, uint32_t irq_status)
Jason Robertsce082592010-05-13 15:57:33 +0100123{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900124 /* write one to clear bits */
125 iowrite32(irq_status, denali->flash_reg + INTR_STATUS(bank));
Jason Robertsce082592010-05-13 15:57:33 +0100126}
127
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900128static void denali_clear_irq_all(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100129{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900130 int i;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900131
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900132 for (i = 0; i < DENALI_NR_BANKS; i++)
133 denali_clear_irq(denali, i, U32_MAX);
Jason Robertsce082592010-05-13 15:57:33 +0100134}
135
Jason Robertsce082592010-05-13 15:57:33 +0100136static irqreturn_t denali_isr(int irq, void *dev_id)
137{
138 struct denali_nand_info *denali = dev_id;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900139 irqreturn_t ret = IRQ_NONE;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900140 uint32_t irq_status;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900141 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100142
143 spin_lock(&denali->irq_lock);
144
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900145 for (i = 0; i < DENALI_NR_BANKS; i++) {
146 irq_status = ioread32(denali->flash_reg + INTR_STATUS(i));
147 if (irq_status)
148 ret = IRQ_HANDLED;
149
150 denali_clear_irq(denali, i, irq_status);
151
152 if (i != denali->flash_bank)
153 continue;
154
155 denali->irq_status |= irq_status;
156
157 if (denali->irq_status & denali->irq_mask)
Jason Robertsce082592010-05-13 15:57:33 +0100158 complete(&denali->complete);
Jason Robertsce082592010-05-13 15:57:33 +0100159 }
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900160
Jason Robertsce082592010-05-13 15:57:33 +0100161 spin_unlock(&denali->irq_lock);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900162
163 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100164}
Jason Robertsce082592010-05-13 15:57:33 +0100165
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900166static void denali_reset_irq(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100167{
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900168 unsigned long flags;
Jason Robertsce082592010-05-13 15:57:33 +0100169
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900170 spin_lock_irqsave(&denali->irq_lock, flags);
171 denali->irq_status = 0;
172 denali->irq_mask = 0;
173 spin_unlock_irqrestore(&denali->irq_lock, flags);
174}
Jason Robertsce082592010-05-13 15:57:33 +0100175
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900176static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
177 uint32_t irq_mask)
178{
179 unsigned long time_left, flags;
180 uint32_t irq_status;
Masahiro Yamada81254502014-09-16 20:04:25 +0900181
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900182 spin_lock_irqsave(&denali->irq_lock, flags);
Jason Robertsce082592010-05-13 15:57:33 +0100183
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900184 irq_status = denali->irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100185
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900186 if (irq_mask & irq_status) {
187 /* return immediately if the IRQ has already happened. */
188 spin_unlock_irqrestore(&denali->irq_lock, flags);
189 return irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100190 }
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900191
192 denali->irq_mask = irq_mask;
193 reinit_completion(&denali->complete);
194 spin_unlock_irqrestore(&denali->irq_lock, flags);
195
196 time_left = wait_for_completion_timeout(&denali->complete,
197 msecs_to_jiffies(1000));
198 if (!time_left) {
199 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
200 denali->irq_mask);
201 return 0;
202 }
203
204 return denali->irq_status;
205}
206
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900207static uint32_t denali_check_irq(struct denali_nand_info *denali)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900208{
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900209 unsigned long flags;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900210 uint32_t irq_status;
211
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900212 spin_lock_irqsave(&denali->irq_lock, flags);
213 irq_status = denali->irq_status;
214 spin_unlock_irqrestore(&denali->irq_lock, flags);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900215
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900216 return irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100217}
218
Masahiro Yamada43914a22014-09-09 11:01:51 +0900219/*
220 * This helper function setups the registers for ECC and whether or not
221 * the spare area will be transferred.
222 */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800223static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
Jason Robertsce082592010-05-13 15:57:33 +0100224 bool transfer_spare)
225{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900226 int ecc_en_flag, transfer_spare_flag;
Jason Robertsce082592010-05-13 15:57:33 +0100227
228 /* set ECC, transfer spare bits if needed */
229 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
230 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
231
232 /* Enable spare area/ECC per user's request. */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800233 iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
Masahiro Yamada81254502014-09-16 20:04:25 +0900234 iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
Jason Robertsce082592010-05-13 15:57:33 +0100235}
236
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900237static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
238{
239 struct denali_nand_info *denali = mtd_to_denali(mtd);
240 int i;
241
242 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
243
244 for (i = 0; i < len; i++)
245 buf[i] = ioread32(denali->flash_mem + 0x10);
246}
247
248static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
249{
250 struct denali_nand_info *denali = mtd_to_denali(mtd);
251 int i;
252
253 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
254
255 for (i = 0; i < len; i++)
256 iowrite32(buf[i], denali->flash_mem + 0x10);
257}
258
259static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
260{
261 struct denali_nand_info *denali = mtd_to_denali(mtd);
262 uint16_t *buf16 = (uint16_t *)buf;
263 int i;
264
265 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
266
267 for (i = 0; i < len / 2; i++)
268 buf16[i] = ioread32(denali->flash_mem + 0x10);
269}
270
271static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
272 int len)
273{
274 struct denali_nand_info *denali = mtd_to_denali(mtd);
275 const uint16_t *buf16 = (const uint16_t *)buf;
276 int i;
277
278 iowrite32(MODE_11 | BANK(denali->flash_bank) | 2, denali->flash_mem);
279
280 for (i = 0; i < len / 2; i++)
281 iowrite32(buf16[i], denali->flash_mem + 0x10);
282}
283
284static uint8_t denali_read_byte(struct mtd_info *mtd)
285{
286 uint8_t byte;
287
288 denali_read_buf(mtd, &byte, 1);
289
290 return byte;
291}
292
293static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
294{
295 denali_write_buf(mtd, &byte, 1);
296}
297
298static uint16_t denali_read_word(struct mtd_info *mtd)
299{
300 uint16_t word;
301
302 denali_read_buf16(mtd, (uint8_t *)&word, 2);
303
304 return word;
305}
306
307static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
308{
309 struct denali_nand_info *denali = mtd_to_denali(mtd);
310 uint32_t type;
311
312 if (ctrl & NAND_CLE)
313 type = 0;
314 else if (ctrl & NAND_ALE)
315 type = 1;
316 else
317 return;
318
319 /*
320 * Some commands are followed by chip->dev_ready or chip->waitfunc.
321 * irq_status must be cleared here to catch the R/B# interrupt later.
322 */
323 if (ctrl & NAND_CTRL_CHANGE)
324 denali_reset_irq(denali);
325
326 index_addr(denali, MODE_11 | BANK(denali->flash_bank) | type, dat);
327}
328
329static int denali_dev_ready(struct mtd_info *mtd)
330{
331 struct denali_nand_info *denali = mtd_to_denali(mtd);
332
333 return !!(denali_check_irq(denali) & INTR__INT_ACT);
334}
335
Masahiro Yamada43914a22014-09-09 11:01:51 +0900336/*
337 * sends a pipeline command operation to the controller. See the Denali NAND
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800338 * controller's user guide for more information (section 4.2.3.6).
Jason Robertsce082592010-05-13 15:57:33 +0100339 */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900340static int denali_send_pipeline_cmd(struct denali_nand_info *denali, int page,
Masahiro Yamada81254502014-09-16 20:04:25 +0900341 bool ecc_en, bool transfer_spare,
342 int access_type, int op)
Jason Robertsce082592010-05-13 15:57:33 +0100343{
344 int status = PASS;
Masahiro Yamada8927ad32017-03-30 15:45:49 +0900345 uint32_t addr, cmd;
Jason Robertsce082592010-05-13 15:57:33 +0100346
347 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
348
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900349 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100350
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900351 addr = BANK(denali->flash_bank) | page;
Jason Robertsce082592010-05-13 15:57:33 +0100352
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800353 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800354 cmd = MODE_01 | addr;
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800355 iowrite32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800356 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
Jason Robertsce082592010-05-13 15:57:33 +0100357 /* read spare area */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800358 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900359 index_addr(denali, cmd, access_type);
Jason Robertsce082592010-05-13 15:57:33 +0100360
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800361 cmd = MODE_01 | addr;
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800362 iowrite32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800363 } else if (op == DENALI_READ) {
Jason Robertsce082592010-05-13 15:57:33 +0100364 /* setup page read request for access type */
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800365 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900366 index_addr(denali, cmd, access_type);
Jason Robertsce082592010-05-13 15:57:33 +0100367
Masahiro Yamada8927ad32017-03-30 15:45:49 +0900368 cmd = MODE_01 | addr;
369 iowrite32(cmd, denali->flash_mem);
Jason Robertsce082592010-05-13 15:57:33 +0100370 }
371 return status;
372}
373
374/* helper function that simply writes a buffer to the flash */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800375static int write_data_to_flash_mem(struct denali_nand_info *denali,
Masahiro Yamada81254502014-09-16 20:04:25 +0900376 const uint8_t *buf, int len)
Jason Robertsce082592010-05-13 15:57:33 +0100377{
Masahiro Yamada93e3c8a2014-09-09 11:01:54 +0900378 uint32_t *buf32;
379 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100380
Masahiro Yamada43914a22014-09-09 11:01:51 +0900381 /*
382 * verify that the len is a multiple of 4.
383 * see comment in read_data_from_flash_mem()
384 */
Jason Robertsce082592010-05-13 15:57:33 +0100385 BUG_ON((len % 4) != 0);
386
387 /* write the data to the flash memory */
388 buf32 = (uint32_t *)buf;
389 for (i = 0; i < len / 4; i++)
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +0800390 iowrite32(*buf32++, denali->flash_mem + 0x10);
Masahiro Yamada81254502014-09-16 20:04:25 +0900391 return i * 4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +0100392}
393
394/* helper function that simply reads a buffer from the flash */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800395static int read_data_from_flash_mem(struct denali_nand_info *denali,
Masahiro Yamada81254502014-09-16 20:04:25 +0900396 uint8_t *buf, int len)
Jason Robertsce082592010-05-13 15:57:33 +0100397{
Masahiro Yamada93e3c8a2014-09-09 11:01:54 +0900398 uint32_t *buf32;
399 int i;
Jason Robertsce082592010-05-13 15:57:33 +0100400
Masahiro Yamada43914a22014-09-09 11:01:51 +0900401 /*
402 * we assume that len will be a multiple of 4, if not it would be nice
403 * to know about it ASAP rather than have random failures...
404 * This assumption is based on the fact that this function is designed
405 * to be used to read flash pages, which are typically multiples of 4.
Jason Robertsce082592010-05-13 15:57:33 +0100406 */
Jason Robertsce082592010-05-13 15:57:33 +0100407 BUG_ON((len % 4) != 0);
408
409 /* transfer the data from the flash */
410 buf32 = (uint32_t *)buf;
411 for (i = 0; i < len / 4; i++)
Jason Robertsce082592010-05-13 15:57:33 +0100412 *buf32++ = ioread32(denali->flash_mem + 0x10);
Masahiro Yamada81254502014-09-16 20:04:25 +0900413 return i * 4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +0100414}
415
416/* writes OOB data to the device */
417static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
418{
419 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada5637b692014-09-09 11:01:52 +0900420 uint32_t irq_status;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900421 uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100422 int status = 0;
423
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900424 if (denali_send_pipeline_cmd(denali, page, false, false, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800425 DENALI_WRITE) == PASS) {
Jason Robertsce082592010-05-13 15:57:33 +0100426 write_data_to_flash_mem(denali, buf, mtd->oobsize);
427
Jason Robertsce082592010-05-13 15:57:33 +0100428 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900429 irq_status = denali_wait_for_irq(denali, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100430
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900431 if (!(irq_status & INTR__PROGRAM_COMP)) {
Jamie Iles84457942011-05-06 15:28:55 +0100432 dev_err(denali->dev, "OOB write failed\n");
Jason Robertsce082592010-05-13 15:57:33 +0100433 status = -EIO;
434 }
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800435 } else {
Jamie Iles84457942011-05-06 15:28:55 +0100436 dev_err(denali->dev, "unable to send pipeline command\n");
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800437 status = -EIO;
Jason Robertsce082592010-05-13 15:57:33 +0100438 }
439 return status;
440}
441
442/* reads OOB data from the device */
443static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
444{
445 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada1aded582017-03-23 05:07:06 +0900446 uint32_t irq_mask = INTR__LOAD_COMP;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900447 uint32_t irq_status, addr, cmd;
Jason Robertsce082592010-05-13 15:57:33 +0100448
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900449 if (denali_send_pipeline_cmd(denali, page, false, true, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800450 DENALI_READ) == PASS) {
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800451 read_data_from_flash_mem(denali, buf, mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +0100452
Masahiro Yamada43914a22014-09-09 11:01:51 +0900453 /*
454 * wait for command to be accepted
455 * can always use status0 bit as the
456 * mask is identical for each bank.
457 */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900458 irq_status = denali_wait_for_irq(denali, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100459
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900460 if (!(irq_status & INTR__LOAD_COMP))
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900461 dev_err(denali->dev, "page on OOB timeout %d\n", page);
Jason Robertsce082592010-05-13 15:57:33 +0100462
Masahiro Yamada43914a22014-09-09 11:01:51 +0900463 /*
464 * We set the device back to MAIN_ACCESS here as I observed
Jason Robertsce082592010-05-13 15:57:33 +0100465 * instability with the controller if you do a block erase
466 * and the last transaction was a SPARE_ACCESS. Block erase
467 * is reliable (according to the MTD test infrastructure)
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800468 * if you are in MAIN_ACCESS.
Jason Robertsce082592010-05-13 15:57:33 +0100469 */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900470 addr = BANK(denali->flash_bank) | page;
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800471 cmd = MODE_10 | addr;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900472 index_addr(denali, cmd, MAIN_ACCESS);
Jason Robertsce082592010-05-13 15:57:33 +0100473 }
474}
475
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900476static int denali_check_erased_page(struct mtd_info *mtd,
477 struct nand_chip *chip, uint8_t *buf,
478 unsigned long uncor_ecc_flags,
479 unsigned int max_bitflips)
Jason Robertsce082592010-05-13 15:57:33 +0100480{
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900481 uint8_t *ecc_code = chip->buffers->ecccode;
482 int ecc_steps = chip->ecc.steps;
483 int ecc_size = chip->ecc.size;
484 int ecc_bytes = chip->ecc.bytes;
485 int i, ret, stat;
Masahiro Yamada81254502014-09-16 20:04:25 +0900486
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900487 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
488 chip->ecc.total);
489 if (ret)
490 return ret;
491
492 for (i = 0; i < ecc_steps; i++) {
493 if (!(uncor_ecc_flags & BIT(i)))
494 continue;
495
496 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
497 ecc_code, ecc_bytes,
498 NULL, 0,
499 chip->ecc.strength);
500 if (stat < 0) {
501 mtd->ecc_stats.failed++;
502 } else {
503 mtd->ecc_stats.corrected += stat;
504 max_bitflips = max_t(unsigned int, max_bitflips, stat);
505 }
506
507 buf += ecc_size;
508 ecc_code += ecc_bytes;
509 }
510
511 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100512}
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900513
Masahiro Yamada24715c72017-03-30 15:45:52 +0900514static int denali_hw_ecc_fixup(struct mtd_info *mtd,
515 struct denali_nand_info *denali,
516 unsigned long *uncor_ecc_flags)
517{
518 struct nand_chip *chip = mtd_to_nand(mtd);
519 int bank = denali->flash_bank;
520 uint32_t ecc_cor;
521 unsigned int max_bitflips;
522
523 ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
524 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
525
526 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
527 /*
528 * This flag is set when uncorrectable error occurs at least in
529 * one ECC sector. We can not know "how many sectors", or
530 * "which sector(s)". We need erase-page check for all sectors.
531 */
532 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
533 return 0;
534 }
535
536 max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
537
538 /*
539 * The register holds the maximum of per-sector corrected bitflips.
540 * This is suitable for the return value of the ->read_page() callback.
541 * Unfortunately, we can not know the total number of corrected bits in
542 * the page. Increase the stats by max_bitflips. (compromised solution)
543 */
544 mtd->ecc_stats.corrected += max_bitflips;
545
546 return max_bitflips;
547}
548
Jason Robertsce082592010-05-13 15:57:33 +0100549#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
550#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
551#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
Masahiro Yamada20d48592017-03-30 15:45:50 +0900552#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
Chuanxiao Dong8ae61eb2010-08-10 00:07:01 +0800553#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
Jason Robertsce082592010-05-13 15:57:33 +0100554#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
555
Masahiro Yamada24715c72017-03-30 15:45:52 +0900556static int denali_sw_ecc_fixup(struct mtd_info *mtd,
557 struct denali_nand_info *denali,
558 unsigned long *uncor_ecc_flags, uint8_t *buf)
Jason Robertsce082592010-05-13 15:57:33 +0100559{
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900560 unsigned int ecc_size = denali->nand.ecc.size;
Mike Dunn3f91e942012-04-25 12:06:09 -0700561 unsigned int bitflips = 0;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900562 unsigned int max_bitflips = 0;
563 uint32_t err_addr, err_cor_info;
564 unsigned int err_byte, err_sector, err_device;
565 uint8_t err_cor_value;
566 unsigned int prev_sector = 0;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900567 uint32_t irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100568
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900569 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100570
Masahiro Yamada20d48592017-03-30 15:45:50 +0900571 do {
572 err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
573 err_sector = ECC_SECTOR(err_addr);
574 err_byte = ECC_BYTE(err_addr);
Jason Robertsce082592010-05-13 15:57:33 +0100575
Masahiro Yamada20d48592017-03-30 15:45:50 +0900576 err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
577 err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
578 err_device = ECC_ERR_DEVICE(err_cor_info);
Jason Robertsce082592010-05-13 15:57:33 +0100579
Masahiro Yamada20d48592017-03-30 15:45:50 +0900580 /* reset the bitflip counter when crossing ECC sector */
581 if (err_sector != prev_sector)
582 bitflips = 0;
Masahiro Yamada81254502014-09-16 20:04:25 +0900583
Masahiro Yamada20d48592017-03-30 15:45:50 +0900584 if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
585 /*
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900586 * Check later if this is a real ECC error, or
587 * an erased sector.
Masahiro Yamada20d48592017-03-30 15:45:50 +0900588 */
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900589 *uncor_ecc_flags |= BIT(err_sector);
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900590 } else if (err_byte < ecc_size) {
Masahiro Yamada20d48592017-03-30 15:45:50 +0900591 /*
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900592 * If err_byte is larger than ecc_size, means error
Masahiro Yamada20d48592017-03-30 15:45:50 +0900593 * happened in OOB, so we ignore it. It's no need for
594 * us to correct it err_device is represented the NAND
595 * error bits are happened in if there are more than
596 * one NAND connected.
597 */
598 int offset;
599 unsigned int flips_in_byte;
600
Masahiro Yamada7de117f2017-06-07 20:52:12 +0900601 offset = (err_sector * ecc_size + err_byte) *
Masahiro Yamada20d48592017-03-30 15:45:50 +0900602 denali->devnum + err_device;
603
604 /* correct the ECC error */
605 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
606 buf[offset] ^= err_cor_value;
607 mtd->ecc_stats.corrected += flips_in_byte;
608 bitflips += flips_in_byte;
609
610 max_bitflips = max(max_bitflips, bitflips);
611 }
612
613 prev_sector = err_sector;
614 } while (!ECC_LAST_ERR(err_cor_info));
615
616 /*
617 * Once handle all ecc errors, controller will trigger a
618 * ECC_TRANSACTION_DONE interrupt, so here just wait for
619 * a while for this interrupt
620 */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900621 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
622 if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
623 return -EIO;
Masahiro Yamada20d48592017-03-30 15:45:50 +0900624
625 return max_bitflips;
Jason Robertsce082592010-05-13 15:57:33 +0100626}
627
628/* programs the controller to either enable/disable DMA transfers */
David Woodhouseaadff492010-05-13 16:12:43 +0100629static void denali_enable_dma(struct denali_nand_info *denali, bool en)
Jason Robertsce082592010-05-13 15:57:33 +0100630{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900631 iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +0100632 ioread32(denali->flash_reg + DMA_ENABLE);
633}
634
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900635static void denali_setup_dma64(struct denali_nand_info *denali,
636 dma_addr_t dma_addr, int page, int op)
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900637{
638 uint32_t mode;
639 const int page_count = 1;
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900640
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900641 mode = MODE_10 | BANK(denali->flash_bank) | page;
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900642
643 /* DMA is a three step process */
644
645 /*
646 * 1. setup transfer type, interrupt when complete,
647 * burst len = 64 bytes, the number of pages
648 */
649 index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
650
651 /* 2. set memory low address */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900652 index_addr(denali, mode, dma_addr);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900653
654 /* 3. set memory high address */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900655 index_addr(denali, mode, (uint64_t)dma_addr >> 32);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900656}
657
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900658static void denali_setup_dma32(struct denali_nand_info *denali,
659 dma_addr_t dma_addr, int page, int op)
Jason Robertsce082592010-05-13 15:57:33 +0100660{
Masahiro Yamada5637b692014-09-09 11:01:52 +0900661 uint32_t mode;
Jason Robertsce082592010-05-13 15:57:33 +0100662 const int page_count = 1;
Jason Robertsce082592010-05-13 15:57:33 +0100663
664 mode = MODE_10 | BANK(denali->flash_bank);
665
666 /* DMA is a four step process */
667
668 /* 1. setup transfer type and # of pages */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900669 index_addr(denali, mode | page, 0x2000 | op | page_count);
Jason Robertsce082592010-05-13 15:57:33 +0100670
671 /* 2. set memory high address bits 23:8 */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900672 index_addr(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
Jason Robertsce082592010-05-13 15:57:33 +0100673
674 /* 3. set memory low address bits 23:8 */
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900675 index_addr(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
Jason Robertsce082592010-05-13 15:57:33 +0100676
Masahiro Yamada43914a22014-09-09 11:01:51 +0900677 /* 4. interrupt when complete, burst len = 64 bytes */
Jason Robertsce082592010-05-13 15:57:33 +0100678 index_addr(denali, mode | 0x14000, 0x2400);
679}
680
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900681static void denali_setup_dma(struct denali_nand_info *denali,
682 dma_addr_t dma_addr, int page, int op)
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900683{
684 if (denali->caps & DENALI_CAP_DMA_64BIT)
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900685 denali_setup_dma64(denali, dma_addr, page, op);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900686 else
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900687 denali_setup_dma32(denali, dma_addr, page, op);
Masahiro Yamada210a2c82017-03-30 15:45:54 +0900688}
689
Masahiro Yamada43914a22014-09-09 11:01:51 +0900690/*
691 * writes a page. user specifies type, and this function handles the
692 * configuration details.
693 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800694static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900695 const uint8_t *buf, int page, bool raw_xfer)
Jason Robertsce082592010-05-13 15:57:33 +0100696{
697 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada00fc6152017-06-13 22:45:43 +0900698 dma_addr_t addr = denali->dma_addr;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100699 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900700 uint32_t irq_status;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900701 uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900702 int ret = 0;
703
Masahiro Yamada43914a22014-09-09 11:01:51 +0900704 /*
705 * if it is a raw xfer, we want to disable ecc and send the spare area.
Jason Robertsce082592010-05-13 15:57:33 +0100706 * !raw_xfer - enable ecc
707 * raw_xfer - transfer spare
708 */
709 setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
710
711 /* copy buffer into DMA buffer */
Masahiro Yamada00fc6152017-06-13 22:45:43 +0900712 memcpy(denali->buf, buf, mtd->writesize);
Jason Robertsce082592010-05-13 15:57:33 +0100713
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800714 if (raw_xfer) {
Jason Robertsce082592010-05-13 15:57:33 +0100715 /* transfer the data to the spare area */
Masahiro Yamada00fc6152017-06-13 22:45:43 +0900716 memcpy(denali->buf + mtd->writesize,
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800717 chip->oob_poi,
718 mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +0100719 }
720
Jamie Iles84457942011-05-06 15:28:55 +0100721 dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100722
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900723 denali_reset_irq(denali);
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800724 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +0100725
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900726 denali_setup_dma(denali, addr, page, DENALI_WRITE);
Jason Robertsce082592010-05-13 15:57:33 +0100727
728 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900729 irq_status = denali_wait_for_irq(denali, irq_mask);
730 if (!(irq_status & INTR__DMA_CMD_COMP)) {
Masahiro Yamada81254502014-09-16 20:04:25 +0900731 dev_err(denali->dev, "timeout on write_page (type = %d)\n",
732 raw_xfer);
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900733 ret = -EIO;
Jason Robertsce082592010-05-13 15:57:33 +0100734 }
735
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800736 denali_enable_dma(denali, false);
Jamie Iles84457942011-05-06 15:28:55 +0100737 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
Josh Wufdbad98d2012-06-25 18:07:45 +0800738
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900739 return ret;
Jason Robertsce082592010-05-13 15:57:33 +0100740}
741
742/* NAND core entry points */
743
Masahiro Yamada43914a22014-09-09 11:01:51 +0900744/*
745 * this is the callback that the NAND core calls to write a page. Since
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800746 * writing a page with ECC or without is similar, all the work is done
747 * by write_page above.
Masahiro Yamada43914a22014-09-09 11:01:51 +0900748 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800749static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
Boris BREZILLON45aaeff2015-10-13 11:22:18 +0200750 const uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100751{
Masahiro Yamada43914a22014-09-09 11:01:51 +0900752 /*
753 * for regular page writes, we let HW handle all the ECC
754 * data written to the device.
755 */
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900756 return write_page(mtd, chip, buf, page, false);
Jason Robertsce082592010-05-13 15:57:33 +0100757}
758
Masahiro Yamada43914a22014-09-09 11:01:51 +0900759/*
760 * This is the callback that the NAND core calls to write a page without ECC.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300761 * raw access is similar to ECC page writes, so all the work is done in the
Chuanxiao Dongb292c342010-08-11 17:46:00 +0800762 * write_page() function above.
Jason Robertsce082592010-05-13 15:57:33 +0100763 */
Josh Wufdbad98d2012-06-25 18:07:45 +0800764static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Boris BREZILLON45aaeff2015-10-13 11:22:18 +0200765 const uint8_t *buf, int oob_required,
766 int page)
Jason Robertsce082592010-05-13 15:57:33 +0100767{
Masahiro Yamada43914a22014-09-09 11:01:51 +0900768 /*
769 * for raw page writes, we want to disable ECC and simply write
770 * whatever data is in the buffer.
771 */
Masahiro Yamadab21ff822017-06-13 22:45:35 +0900772 return write_page(mtd, chip, buf, page, true);
Jason Robertsce082592010-05-13 15:57:33 +0100773}
774
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800775static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +0100776 int page)
777{
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800778 return write_oob_data(mtd, chip->oob_poi, page);
Jason Robertsce082592010-05-13 15:57:33 +0100779}
780
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800781static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300782 int page)
Jason Robertsce082592010-05-13 15:57:33 +0100783{
784 read_oob_data(mtd, chip->oob_poi, page);
785
Shmulik Ladkani5c2ffb12012-05-09 13:06:35 +0300786 return 0;
Jason Robertsce082592010-05-13 15:57:33 +0100787}
788
789static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700790 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100791{
792 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada00fc6152017-06-13 22:45:43 +0900793 dma_addr_t addr = denali->dma_addr;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100794 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada5637b692014-09-09 11:01:52 +0900795 uint32_t irq_status;
Masahiro Yamada24715c72017-03-30 15:45:52 +0900796 uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ?
797 INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR :
798 INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR;
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900799 unsigned long uncor_ecc_flags = 0;
800 int stat = 0;
Jason Robertsce082592010-05-13 15:57:33 +0100801
802 setup_ecc_for_xfer(denali, true, false);
803
David Woodhouseaadff492010-05-13 16:12:43 +0100804 denali_enable_dma(denali, true);
Jamie Iles84457942011-05-06 15:28:55 +0100805 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100806
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900807 denali_reset_irq(denali);
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900808 denali_setup_dma(denali, addr, page, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +0100809
810 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900811 irq_status = denali_wait_for_irq(denali, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100812
Jamie Iles84457942011-05-06 15:28:55 +0100813 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100814
Masahiro Yamada00fc6152017-06-13 22:45:43 +0900815 memcpy(buf, denali->buf, mtd->writesize);
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800816
Masahiro Yamada24715c72017-03-30 15:45:52 +0900817 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
818 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
819 else if (irq_status & INTR__ECC_ERR)
820 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
David Woodhouseaadff492010-05-13 16:12:43 +0100821 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100822
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900823 if (stat < 0)
824 return stat;
825
826 if (uncor_ecc_flags) {
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900827 read_oob_data(mtd, chip->oob_poi, page);
Jason Robertsce082592010-05-13 15:57:33 +0100828
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900829 stat = denali_check_erased_page(mtd, chip, buf,
830 uncor_ecc_flags, stat);
Jason Robertsce082592010-05-13 15:57:33 +0100831 }
Masahiro Yamadad29109b2017-03-30 15:45:51 +0900832
833 return stat;
Jason Robertsce082592010-05-13 15:57:33 +0100834}
835
836static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700837 uint8_t *buf, int oob_required, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100838{
839 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada00fc6152017-06-13 22:45:43 +0900840 dma_addr_t addr = denali->dma_addr;
Boris BREZILLON442f201b2015-12-11 15:06:00 +0100841 size_t size = mtd->writesize + mtd->oobsize;
Masahiro Yamada1aded582017-03-23 05:07:06 +0900842 uint32_t irq_mask = INTR__DMA_CMD_COMP;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900843 uint32_t irq_status;
Chuanxiao5bac3acf2010-08-05 23:06:04 +0800844
Jason Robertsce082592010-05-13 15:57:33 +0100845 setup_ecc_for_xfer(denali, false, true);
David Woodhouseaadff492010-05-13 16:12:43 +0100846 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +0100847
Jamie Iles84457942011-05-06 15:28:55 +0100848 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100849
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900850 denali_reset_irq(denali);
Masahiro Yamada2291cb82017-06-13 22:45:42 +0900851 denali_setup_dma(denali, addr, page, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +0100852
853 /* wait for operation to complete */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900854 irq_status = denali_wait_for_irq(denali, irq_mask);
855 if (irq_status & INTR__DMA_CMD_COMP)
856 return -ETIMEDOUT;
Jason Robertsce082592010-05-13 15:57:33 +0100857
Jamie Iles84457942011-05-06 15:28:55 +0100858 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
Jason Robertsce082592010-05-13 15:57:33 +0100859
David Woodhouseaadff492010-05-13 16:12:43 +0100860 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100861
Masahiro Yamada00fc6152017-06-13 22:45:43 +0900862 memcpy(buf, denali->buf, mtd->writesize);
863 memcpy(chip->oob_poi, denali->buf + mtd->writesize, mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +0100864
865 return 0;
866}
867
Jason Robertsce082592010-05-13 15:57:33 +0100868static void denali_select_chip(struct mtd_info *mtd, int chip)
869{
870 struct denali_nand_info *denali = mtd_to_denali(mtd);
Chuanxiao Dong7cfffac2010-08-10 00:16:51 +0800871
Jason Robertsce082592010-05-13 15:57:33 +0100872 denali->flash_bank = chip;
Jason Robertsce082592010-05-13 15:57:33 +0100873}
874
875static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
876{
Masahiro Yamadafa6134e2017-06-13 22:45:39 +0900877 struct denali_nand_info *denali = mtd_to_denali(mtd);
878 uint32_t irq_status;
879
880 /* R/B# pin transitioned from low to high? */
881 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
882
883 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100884}
885
Brian Norris49c50b92014-05-06 16:02:19 -0700886static int denali_erase(struct mtd_info *mtd, int page)
Jason Robertsce082592010-05-13 15:57:33 +0100887{
888 struct denali_nand_info *denali = mtd_to_denali(mtd);
Masahiro Yamada5637b692014-09-09 11:01:52 +0900889 uint32_t cmd, irq_status;
Jason Robertsce082592010-05-13 15:57:33 +0100890
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900891 denali_reset_irq(denali);
Jason Robertsce082592010-05-13 15:57:33 +0100892
893 /* setup page read request for access type */
894 cmd = MODE_10 | BANK(denali->flash_bank) | page;
Masahiro Yamada3157d1e2014-09-09 11:01:53 +0900895 index_addr(denali, cmd, 0x1);
Jason Robertsce082592010-05-13 15:57:33 +0100896
897 /* wait for erase to complete or failure to occur */
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900898 irq_status = denali_wait_for_irq(denali,
899 INTR__ERASE_COMP | INTR__ERASE_FAIL);
Jason Robertsce082592010-05-13 15:57:33 +0100900
Masahiro Yamadac19e31d2017-06-13 22:45:38 +0900901 return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
Jason Robertsce082592010-05-13 15:57:33 +0100902}
903
Masahiro Yamada1bb88662017-06-13 22:45:37 +0900904#define DIV_ROUND_DOWN_ULL(ll, d) \
905 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
906
907static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
908 const struct nand_data_interface *conf)
909{
910 struct denali_nand_info *denali = mtd_to_denali(mtd);
911 const struct nand_sdr_timings *timings;
912 unsigned long t_clk;
913 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
914 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
915 int addr_2_data_mask;
916 uint32_t tmp;
917
918 timings = nand_get_sdr_timings(conf);
919 if (IS_ERR(timings))
920 return PTR_ERR(timings);
921
922 /* clk_x period in picoseconds */
923 t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
924 if (!t_clk)
925 return -EINVAL;
926
927 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
928 return 0;
929
930 /* tREA -> ACC_CLKS */
931 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
932 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
933
934 tmp = ioread32(denali->flash_reg + ACC_CLKS);
935 tmp &= ~ACC_CLKS__VALUE;
936 tmp |= acc_clks;
937 iowrite32(tmp, denali->flash_reg + ACC_CLKS);
938
939 /* tRWH -> RE_2_WE */
940 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
941 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
942
943 tmp = ioread32(denali->flash_reg + RE_2_WE);
944 tmp &= ~RE_2_WE__VALUE;
945 tmp |= re_2_we;
946 iowrite32(tmp, denali->flash_reg + RE_2_WE);
947
948 /* tRHZ -> RE_2_RE */
949 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
950 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
951
952 tmp = ioread32(denali->flash_reg + RE_2_RE);
953 tmp &= ~RE_2_RE__VALUE;
954 tmp |= re_2_re;
955 iowrite32(tmp, denali->flash_reg + RE_2_RE);
956
957 /* tWHR -> WE_2_RE */
958 we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
959 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
960
961 tmp = ioread32(denali->flash_reg + TWHR2_AND_WE_2_RE);
962 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
963 tmp |= we_2_re;
964 iowrite32(tmp, denali->flash_reg + TWHR2_AND_WE_2_RE);
965
966 /* tADL -> ADDR_2_DATA */
967
968 /* for older versions, ADDR_2_DATA is only 6 bit wide */
969 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
970 if (denali->revision < 0x0501)
971 addr_2_data_mask >>= 1;
972
973 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
974 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
975
976 tmp = ioread32(denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
977 tmp &= ~addr_2_data_mask;
978 tmp |= addr_2_data;
979 iowrite32(tmp, denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
980
981 /* tREH, tWH -> RDWR_EN_HI_CNT */
982 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
983 t_clk);
984 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
985
986 tmp = ioread32(denali->flash_reg + RDWR_EN_HI_CNT);
987 tmp &= ~RDWR_EN_HI_CNT__VALUE;
988 tmp |= rdwr_en_hi;
989 iowrite32(tmp, denali->flash_reg + RDWR_EN_HI_CNT);
990
991 /* tRP, tWP -> RDWR_EN_LO_CNT */
992 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
993 t_clk);
994 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
995 t_clk);
996 rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
997 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
998 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
999
1000 tmp = ioread32(denali->flash_reg + RDWR_EN_LO_CNT);
1001 tmp &= ~RDWR_EN_LO_CNT__VALUE;
1002 tmp |= rdwr_en_lo;
1003 iowrite32(tmp, denali->flash_reg + RDWR_EN_LO_CNT);
1004
1005 /* tCS, tCEA -> CS_SETUP_CNT */
1006 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
1007 (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
1008 0);
1009 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1010
1011 tmp = ioread32(denali->flash_reg + CS_SETUP_CNT);
1012 tmp &= ~CS_SETUP_CNT__VALUE;
1013 tmp |= cs_setup;
1014 iowrite32(tmp, denali->flash_reg + CS_SETUP_CNT);
1015
1016 return 0;
1017}
Jason Robertsce082592010-05-13 15:57:33 +01001018
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001019static void denali_reset_banks(struct denali_nand_info *denali)
1020{
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001021 u32 irq_status;
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001022 int i;
1023
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001024 for (i = 0; i < denali->max_banks; i++) {
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001025 denali->flash_bank = i;
1026
1027 denali_reset_irq(denali);
1028
1029 iowrite32(DEVICE_RESET__BANK(i),
1030 denali->flash_reg + DEVICE_RESET);
1031
1032 irq_status = denali_wait_for_irq(denali,
1033 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
1034 if (!(irq_status & INTR__INT_ACT))
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001035 break;
1036 }
1037
1038 dev_dbg(denali->dev, "%d chips connected\n", i);
1039 denali->max_banks = i;
Masahiro Yamadaf4862872017-06-13 22:45:40 +09001040}
1041
Jason Robertsce082592010-05-13 15:57:33 +01001042static void denali_hw_init(struct denali_nand_info *denali)
1043{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001044 /*
Masahiro Yamadae7beeee2017-03-30 15:45:57 +09001045 * The REVISION register may not be reliable. Platforms are allowed to
1046 * override it.
1047 */
1048 if (!denali->revision)
1049 denali->revision =
1050 swab16(ioread32(denali->flash_reg + REVISION));
1051
1052 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001053 * tell driver how many bit controller will skip before
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001054 * writing ECC code in OOB, this register may be already
1055 * set by firmware. So we read this value out.
1056 * if this value is 0, just let it be.
Masahiro Yamada43914a22014-09-09 11:01:51 +09001057 */
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001058 denali->bbtskipbytes = ioread32(denali->flash_reg +
1059 SPARE_AREA_SKIP_BYTES);
Jamie Ilesbc27ede2011-06-06 17:11:34 +01001060 detect_max_banks(denali);
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001061 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1062 iowrite32(CHIP_EN_DONT_CARE__FLAG,
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001063 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
Jason Robertsce082592010-05-13 15:57:33 +01001064
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001065 iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
Jason Robertsce082592010-05-13 15:57:33 +01001066
1067 /* Should set value for these registers when init */
Chuanxiao Dong24c3fa32010-08-09 23:59:23 +08001068 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1069 iowrite32(1, denali->flash_reg + ECC_ENABLE);
Jason Robertsce082592010-05-13 15:57:33 +01001070}
1071
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001072int denali_calc_ecc_bytes(int step_size, int strength)
1073{
1074 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1075 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1076}
1077EXPORT_SYMBOL(denali_calc_ecc_bytes);
1078
1079static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
1080 struct denali_nand_info *denali)
1081{
1082 int oobavail = mtd->oobsize - denali->bbtskipbytes;
1083 int ret;
1084
1085 /*
1086 * If .size and .strength are already set (usually by DT),
1087 * check if they are supported by this controller.
1088 */
1089 if (chip->ecc.size && chip->ecc.strength)
1090 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
1091
1092 /*
1093 * We want .size and .strength closest to the chip's requirement
1094 * unless NAND_ECC_MAXIMIZE is requested.
1095 */
1096 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
1097 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
1098 if (!ret)
1099 return 0;
1100 }
1101
1102 /* Max ECC strength is the last thing we can do */
1103 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
1104}
Boris Brezillon14fad622016-02-03 20:00:11 +01001105
1106static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1107 struct mtd_oob_region *oobregion)
1108{
1109 struct denali_nand_info *denali = mtd_to_denali(mtd);
1110 struct nand_chip *chip = mtd_to_nand(mtd);
1111
1112 if (section)
1113 return -ERANGE;
1114
1115 oobregion->offset = denali->bbtskipbytes;
1116 oobregion->length = chip->ecc.total;
1117
1118 return 0;
1119}
1120
1121static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1122 struct mtd_oob_region *oobregion)
1123{
1124 struct denali_nand_info *denali = mtd_to_denali(mtd);
1125 struct nand_chip *chip = mtd_to_nand(mtd);
1126
1127 if (section)
1128 return -ERANGE;
1129
1130 oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
1131 oobregion->length = mtd->oobsize - oobregion->offset;
1132
1133 return 0;
1134}
1135
1136static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1137 .ecc = denali_ooblayout_ecc,
1138 .free = denali_ooblayout_free,
Jason Robertsce082592010-05-13 15:57:33 +01001139};
1140
1141static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1142static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1143
1144static struct nand_bbt_descr bbt_main_descr = {
1145 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1146 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1147 .offs = 8,
1148 .len = 4,
1149 .veroffs = 12,
1150 .maxblocks = 4,
1151 .pattern = bbt_pattern,
1152};
1153
1154static struct nand_bbt_descr bbt_mirror_descr = {
1155 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1156 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1157 .offs = 8,
1158 .len = 4,
1159 .veroffs = 12,
1160 .maxblocks = 4,
1161 .pattern = mirror_pattern,
1162};
1163
Uwe Kleine-König421f91d2010-06-11 12:17:00 +02001164/* initialize driver data structures */
Brian Norris8c519432013-08-10 22:57:30 -07001165static void denali_drv_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001166{
Masahiro Yamada43914a22014-09-09 11:01:51 +09001167 /*
1168 * the completion object will be used to notify
1169 * the callee that the interrupt is done
1170 */
Jason Robertsce082592010-05-13 15:57:33 +01001171 init_completion(&denali->complete);
1172
Masahiro Yamada43914a22014-09-09 11:01:51 +09001173 /*
1174 * the spinlock will be used to synchronize the ISR with any
1175 * element that might be access shared data (interrupt status)
1176 */
Jason Robertsce082592010-05-13 15:57:33 +01001177 spin_lock_init(&denali->irq_lock);
Jason Robertsce082592010-05-13 15:57:33 +01001178}
1179
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001180static int denali_multidev_fixup(struct denali_nand_info *denali)
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001181{
1182 struct nand_chip *chip = &denali->nand;
1183 struct mtd_info *mtd = nand_to_mtd(chip);
1184
1185 /*
1186 * Support for multi device:
1187 * When the IP configuration is x16 capable and two x8 chips are
1188 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1189 * In this case, the core framework knows nothing about this fact,
1190 * so we should tell it the _logical_ pagesize and anything necessary.
1191 */
1192 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1193
Masahiro Yamadacc5d8032017-03-23 05:07:22 +09001194 /*
1195 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1196 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1197 */
1198 if (denali->devnum == 0) {
1199 denali->devnum = 1;
1200 iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
1201 }
1202
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001203 if (denali->devnum == 1)
1204 return 0;
1205
1206 if (denali->devnum != 2) {
1207 dev_err(denali->dev, "unsupported number of devices %d\n",
1208 denali->devnum);
1209 return -EINVAL;
1210 }
1211
1212 /* 2 chips in parallel */
1213 mtd->size <<= 1;
1214 mtd->erasesize <<= 1;
1215 mtd->writesize <<= 1;
1216 mtd->oobsize <<= 1;
1217 chip->chipsize <<= 1;
1218 chip->page_shift += 1;
1219 chip->phys_erase_shift += 1;
1220 chip->bbt_erase_shift += 1;
1221 chip->chip_shift += 1;
1222 chip->pagemask <<= 1;
1223 chip->ecc.size <<= 1;
1224 chip->ecc.bytes <<= 1;
1225 chip->ecc.strength <<= 1;
1226 denali->bbtskipbytes <<= 1;
1227
1228 return 0;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001229}
1230
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001231int denali_init(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001232{
Masahiro Yamada1394a722017-03-23 05:07:17 +09001233 struct nand_chip *chip = &denali->nand;
1234 struct mtd_info *mtd = nand_to_mtd(chip);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001235 int ret;
Jason Robertsce082592010-05-13 15:57:33 +01001236
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001237 mtd->dev.parent = denali->dev;
Jason Robertsce082592010-05-13 15:57:33 +01001238 denali_hw_init(denali);
1239 denali_drv_init(denali);
1240
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001241 denali_clear_irq_all(denali);
1242
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001243 /* Request IRQ after all the hardware initialization is finished */
1244 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1245 IRQF_SHARED, DENALI_NAND_NAME, denali);
1246 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001247 dev_err(denali->dev, "Unable to request IRQ\n");
Masahiro Yamada7ebb8d02016-11-09 13:35:27 +09001248 return ret;
Jason Robertsce082592010-05-13 15:57:33 +01001249 }
1250
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001251 denali_enable_irq(denali);
Masahiro Yamadad49f5792017-06-13 22:45:41 +09001252 denali_reset_banks(denali);
1253
1254 denali->flash_bank = CHIP_SELECT_INVALID;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001255
Masahiro Yamada63757d42017-03-23 05:07:18 +09001256 nand_set_flash_node(chip, denali->dev->of_node);
Masahiro Yamada8aabdf32017-03-30 15:45:48 +09001257 /* Fallback to the default name if DT did not give "label" property */
1258 if (!mtd->name)
1259 mtd->name = "denali-nand";
Jason Robertsce082592010-05-13 15:57:33 +01001260
1261 /* register the driver with the NAND core subsystem */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001262 chip->select_chip = denali_select_chip;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001263 chip->read_byte = denali_read_byte;
Masahiro Yamadafa6134e2017-06-13 22:45:39 +09001264 chip->write_byte = denali_write_byte;
1265 chip->read_word = denali_read_word;
1266 chip->cmd_ctrl = denali_cmd_ctrl;
1267 chip->dev_ready = denali_dev_ready;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001268 chip->waitfunc = denali_waitfunc;
Jason Robertsce082592010-05-13 15:57:33 +01001269
Masahiro Yamada1bb88662017-06-13 22:45:37 +09001270 /* clk rate info is needed for setup_data_interface */
1271 if (denali->clk_x_rate)
1272 chip->setup_data_interface = denali_setup_data_interface;
1273
Masahiro Yamada43914a22014-09-09 11:01:51 +09001274 /*
1275 * scan for NAND devices attached to the controller
Jason Robertsce082592010-05-13 15:57:33 +01001276 * this is the first stage in a two step process to register
Masahiro Yamada43914a22014-09-09 11:01:51 +09001277 * with the nand subsystem
1278 */
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001279 ret = nand_scan_ident(mtd, denali->max_banks, NULL);
1280 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001281 goto disable_irq;
Chuanxiao5bac3acf2010-08-05 23:06:04 +08001282
Masahiro Yamada00fc6152017-06-13 22:45:43 +09001283 denali->buf = devm_kzalloc(denali->dev, mtd->writesize + mtd->oobsize,
1284 GFP_KERNEL);
1285 if (!denali->buf) {
Huang Shijiee07caa32013-12-21 00:02:28 +08001286 ret = -ENOMEM;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001287 goto disable_irq;
Huang Shijiee07caa32013-12-21 00:02:28 +08001288 }
1289
Masahiro Yamada210a2c82017-03-30 15:45:54 +09001290 ret = dma_set_mask(denali->dev,
1291 DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ?
1292 64 : 32));
Huang Shijiee07caa32013-12-21 00:02:28 +08001293 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001294 dev_err(denali->dev, "No usable DMA configuration\n");
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001295 goto disable_irq;
Huang Shijiee07caa32013-12-21 00:02:28 +08001296 }
1297
Masahiro Yamada00fc6152017-06-13 22:45:43 +09001298 denali->dma_addr = dma_map_single(denali->dev, denali->buf,
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001299 mtd->writesize + mtd->oobsize,
Huang Shijiee07caa32013-12-21 00:02:28 +08001300 DMA_BIDIRECTIONAL);
Masahiro Yamada00fc6152017-06-13 22:45:43 +09001301 if (dma_mapping_error(denali->dev, denali->dma_addr)) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001302 dev_err(denali->dev, "Failed to map DMA buffer\n");
Huang Shijiee07caa32013-12-21 00:02:28 +08001303 ret = -EIO;
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001304 goto disable_irq;
Chuanxiao.Dong664065242010-08-06 18:48:21 +08001305 }
1306
Masahiro Yamada43914a22014-09-09 11:01:51 +09001307 /*
Masahiro Yamada43914a22014-09-09 11:01:51 +09001308 * second stage of the NAND scan
Chuanxiao5bac3acf2010-08-05 23:06:04 +08001309 * this stage requires information regarding ECC and
Masahiro Yamada43914a22014-09-09 11:01:51 +09001310 * bad block management.
1311 */
Jason Robertsce082592010-05-13 15:57:33 +01001312
1313 /* Bad block management */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001314 chip->bbt_td = &bbt_main_descr;
1315 chip->bbt_md = &bbt_mirror_descr;
Jason Robertsce082592010-05-13 15:57:33 +01001316
1317 /* skip the scan for now until we have OOB read and write support */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001318 chip->bbt_options |= NAND_BBT_USE_FLASH;
1319 chip->options |= NAND_SKIP_BBTSCAN;
1320 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
Jason Robertsce082592010-05-13 15:57:33 +01001321
Graham Moored99d7282015-01-14 09:38:50 -06001322 /* no subpage writes on denali */
Masahiro Yamada1394a722017-03-23 05:07:17 +09001323 chip->options |= NAND_NO_SUBPAGE_WRITE;
Graham Moored99d7282015-01-14 09:38:50 -06001324
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001325 ret = denali_ecc_setup(mtd, chip, denali);
1326 if (ret) {
1327 dev_err(denali->dev, "Failed to setup ECC settings.\n");
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001328 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001329 }
1330
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001331 dev_dbg(denali->dev,
1332 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1333 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1334
1335 iowrite32(chip->ecc.strength, denali->flash_reg + ECC_CORRECTION);
Masahiro Yamada0615e7a2017-06-07 20:52:13 +09001336 iowrite32(mtd->erasesize / mtd->writesize,
1337 denali->flash_reg + PAGES_PER_BLOCK);
1338 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1339 denali->flash_reg + DEVICE_WIDTH);
1340 iowrite32(mtd->writesize, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
1341 iowrite32(mtd->oobsize, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
Masahiro Yamada7de117f2017-06-07 20:52:12 +09001342
1343 iowrite32(chip->ecc.size, denali->flash_reg + CFG_DATA_BLOCK_SIZE);
1344 iowrite32(chip->ecc.size, denali->flash_reg + CFG_LAST_DATA_BLOCK_SIZE);
1345 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1346 iowrite32(mtd->writesize / chip->ecc.size,
1347 denali->flash_reg + CFG_NUM_DATA_BLOCKS);
1348
Boris Brezillon14fad622016-02-03 20:00:11 +01001349 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
Chuanxiao Dongdb9a32102010-08-06 18:02:03 +08001350
Masahiro Yamadafa6134e2017-06-13 22:45:39 +09001351 if (chip->options & NAND_BUSWIDTH_16) {
1352 chip->read_buf = denali_read_buf16;
1353 chip->write_buf = denali_write_buf16;
1354 } else {
1355 chip->read_buf = denali_read_buf;
1356 chip->write_buf = denali_write_buf;
1357 }
Masahiro Yamadab21ff822017-06-13 22:45:35 +09001358 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
Masahiro Yamada1394a722017-03-23 05:07:17 +09001359 chip->ecc.read_page = denali_read_page;
1360 chip->ecc.read_page_raw = denali_read_page_raw;
1361 chip->ecc.write_page = denali_write_page;
1362 chip->ecc.write_page_raw = denali_write_page_raw;
1363 chip->ecc.read_oob = denali_read_oob;
1364 chip->ecc.write_oob = denali_write_oob;
1365 chip->erase = denali_erase;
Jason Robertsce082592010-05-13 15:57:33 +01001366
Masahiro Yamadae93c1642017-03-23 05:07:21 +09001367 ret = denali_multidev_fixup(denali);
1368 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001369 goto disable_irq;
Masahiro Yamada6da27b42017-03-23 05:07:20 +09001370
Masahiro Yamadaa227d4e2016-11-09 13:35:28 +09001371 ret = nand_scan_tail(mtd);
1372 if (ret)
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001373 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001374
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001375 ret = mtd_device_register(mtd, NULL, 0);
Jason Robertsce082592010-05-13 15:57:33 +01001376 if (ret) {
Masahiro Yamada789ccf12016-11-09 13:35:24 +09001377 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001378 goto disable_irq;
Jason Robertsce082592010-05-13 15:57:33 +01001379 }
1380 return 0;
1381
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001382disable_irq:
1383 denali_disable_irq(denali);
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001384
Jason Robertsce082592010-05-13 15:57:33 +01001385 return ret;
1386}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001387EXPORT_SYMBOL(denali_init);
Jason Robertsce082592010-05-13 15:57:33 +01001388
1389/* driver exit point */
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001390void denali_remove(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +01001391{
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001392 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
Boris BREZILLON320092a2015-12-11 15:02:34 +01001393 /*
1394 * Pre-compute DMA buffer size to avoid any problems in case
1395 * nand_release() ever changes in a way that mtd->writesize and
1396 * mtd->oobsize are not reliable after this call.
1397 */
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001398 int bufsize = mtd->writesize + mtd->oobsize;
Boris BREZILLON320092a2015-12-11 15:02:34 +01001399
Boris BREZILLON442f201b2015-12-11 15:06:00 +01001400 nand_release(mtd);
Masahiro Yamadac19e31d2017-06-13 22:45:38 +09001401 denali_disable_irq(denali);
Masahiro Yamada00fc6152017-06-13 22:45:43 +09001402 dma_unmap_single(denali->dev, denali->dma_addr, bufsize,
Masahiro Yamada81254502014-09-16 20:04:25 +09001403 DMA_BIDIRECTIONAL);
Jason Robertsce082592010-05-13 15:57:33 +01001404}
Dinh Nguyen2a0a2882012-09-27 10:58:05 -06001405EXPORT_SYMBOL(denali_remove);