blob: c9226172c3a3a3a159db445e73282c719cc41821 [file] [log] [blame]
Archit Tanejac76b78d2016-02-03 14:29:50 +05301/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053024#include <linux/delay.h>
25
26/* NANDc reg offsets */
27#define NAND_FLASH_CMD 0x00
28#define NAND_ADDR0 0x04
29#define NAND_ADDR1 0x08
30#define NAND_FLASH_CHIP_SELECT 0x0c
31#define NAND_EXEC_CMD 0x10
32#define NAND_FLASH_STATUS 0x14
33#define NAND_BUFFER_STATUS 0x18
34#define NAND_DEV0_CFG0 0x20
35#define NAND_DEV0_CFG1 0x24
36#define NAND_DEV0_ECC_CFG 0x28
37#define NAND_DEV1_ECC_CFG 0x2c
38#define NAND_DEV1_CFG0 0x30
39#define NAND_DEV1_CFG1 0x34
40#define NAND_READ_ID 0x40
41#define NAND_READ_STATUS 0x44
42#define NAND_DEV_CMD0 0xa0
43#define NAND_DEV_CMD1 0xa4
44#define NAND_DEV_CMD2 0xa8
45#define NAND_DEV_CMD_VLD 0xac
46#define SFLASHC_BURST_CFG 0xe0
47#define NAND_ERASED_CW_DETECT_CFG 0xe8
48#define NAND_ERASED_CW_DETECT_STATUS 0xec
49#define NAND_EBI2_ECC_BUF_CFG 0xf0
50#define FLASH_BUF_ACC 0x100
51
52#define NAND_CTRL 0xf00
53#define NAND_VERSION 0xf08
54#define NAND_READ_LOCATION_0 0xf20
55#define NAND_READ_LOCATION_1 0xf24
56
57/* dummy register offsets, used by write_reg_dma */
58#define NAND_DEV_CMD1_RESTORE 0xdead
59#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
60
61/* NAND_FLASH_CMD bits */
62#define PAGE_ACC BIT(4)
63#define LAST_PAGE BIT(5)
64
65/* NAND_FLASH_CHIP_SELECT bits */
66#define NAND_DEV_SEL 0
67#define DM_EN BIT(2)
68
69/* NAND_FLASH_STATUS bits */
70#define FS_OP_ERR BIT(4)
71#define FS_READY_BSY_N BIT(5)
72#define FS_MPU_ERR BIT(8)
73#define FS_DEVICE_STS_ERR BIT(16)
74#define FS_DEVICE_WP BIT(23)
75
76/* NAND_BUFFER_STATUS bits */
77#define BS_UNCORRECTABLE_BIT BIT(8)
78#define BS_CORRECTABLE_ERR_MSK 0x1f
79
80/* NAND_DEVn_CFG0 bits */
81#define DISABLE_STATUS_AFTER_WRITE 4
82#define CW_PER_PAGE 6
83#define UD_SIZE_BYTES 9
84#define ECC_PARITY_SIZE_BYTES_RS 19
85#define SPARE_SIZE_BYTES 23
86#define NUM_ADDR_CYCLES 27
87#define STATUS_BFR_READ 30
88#define SET_RD_MODE_AFTER_STATUS 31
89
90/* NAND_DEVn_CFG0 bits */
91#define DEV0_CFG1_ECC_DISABLE 0
92#define WIDE_FLASH 1
93#define NAND_RECOVERY_CYCLES 2
94#define CS_ACTIVE_BSY 5
95#define BAD_BLOCK_BYTE_NUM 6
96#define BAD_BLOCK_IN_SPARE_AREA 16
97#define WR_RD_BSY_GAP 17
98#define ENABLE_BCH_ECC 27
99
100/* NAND_DEV0_ECC_CFG bits */
101#define ECC_CFG_ECC_DISABLE 0
102#define ECC_SW_RESET 1
103#define ECC_MODE 4
104#define ECC_PARITY_SIZE_BYTES_BCH 8
105#define ECC_NUM_DATA_BYTES 16
106#define ECC_FORCE_CLK_OPEN 30
107
108/* NAND_DEV_CMD1 bits */
109#define READ_ADDR 0
110
111/* NAND_DEV_CMD_VLD bits */
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530112#define READ_START_VLD BIT(0)
113#define READ_STOP_VLD BIT(1)
114#define WRITE_START_VLD BIT(2)
115#define ERASE_START_VLD BIT(3)
116#define SEQ_READ_START_VLD BIT(4)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530117
118/* NAND_EBI2_ECC_BUF_CFG bits */
119#define NUM_STEPS 0
120
121/* NAND_ERASED_CW_DETECT_CFG bits */
122#define ERASED_CW_ECC_MASK 1
123#define AUTO_DETECT_RES 0
124#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
125#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
126#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
127#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
128#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
129
130/* NAND_ERASED_CW_DETECT_STATUS bits */
131#define PAGE_ALL_ERASED BIT(7)
132#define CODEWORD_ALL_ERASED BIT(6)
133#define PAGE_ERASED BIT(5)
134#define CODEWORD_ERASED BIT(4)
135#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
136#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
137
138/* Version Mask */
139#define NAND_VERSION_MAJOR_MASK 0xf0000000
140#define NAND_VERSION_MAJOR_SHIFT 28
141#define NAND_VERSION_MINOR_MASK 0x0fff0000
142#define NAND_VERSION_MINOR_SHIFT 16
143
144/* NAND OP_CMDs */
145#define PAGE_READ 0x2
146#define PAGE_READ_WITH_ECC 0x3
147#define PAGE_READ_WITH_ECC_SPARE 0x4
148#define PROGRAM_PAGE 0x6
149#define PAGE_PROGRAM_WITH_ECC 0x7
150#define PROGRAM_PAGE_SPARE 0x9
151#define BLOCK_ERASE 0xa
152#define FETCH_ID 0xb
153#define RESET_DEVICE 0xd
154
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530155/* Default Value for NAND_DEV_CMD_VLD */
156#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
157 ERASE_START_VLD | SEQ_READ_START_VLD)
158
Archit Tanejac76b78d2016-02-03 14:29:50 +0530159/*
160 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
161 * the driver calls the chunks 'step' or 'codeword' interchangeably
162 */
163#define NANDC_STEP_SIZE 512
164
165/*
166 * the largest page size we support is 8K, this will have 16 steps/codewords
167 * of 512 bytes each
168 */
169#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
170
171/* we read at most 3 registers per codeword scan */
172#define MAX_REG_RD (3 * MAX_NUM_STEPS)
173
174/* ECC modes supported by the controller */
175#define ECC_NONE BIT(0)
176#define ECC_RS_4BIT BIT(1)
177#define ECC_BCH_4BIT BIT(2)
178#define ECC_BCH_8BIT BIT(3)
179
Abhishek Sahucb80f112017-08-17 17:37:40 +0530180#define QPIC_PER_CW_CMD_SGL 32
181#define QPIC_PER_CW_DATA_SGL 8
182
183/*
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530184 * Flags used in DMA descriptor preparation helper functions
185 * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
186 */
187/* Don't set the EOT in current tx BAM sgl */
188#define NAND_BAM_NO_EOT BIT(0)
189/* Set the NWD flag in current BAM sgl */
190#define NAND_BAM_NWD BIT(1)
191/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
192#define NAND_BAM_NEXT_SGL BIT(2)
193
194/*
Abhishek Sahucb80f112017-08-17 17:37:40 +0530195 * This data type corresponds to the BAM transaction which will be used for all
196 * NAND transfers.
197 * @cmd_sgl - sgl for NAND BAM command pipe
198 * @data_sgl - sgl for NAND BAM consumer/producer pipe
199 * @cmd_sgl_pos - current index in command sgl.
200 * @cmd_sgl_start - start index in command sgl.
201 * @tx_sgl_pos - current index in data sgl for tx.
202 * @tx_sgl_start - start index in data sgl for tx.
203 * @rx_sgl_pos - current index in data sgl for rx.
204 * @rx_sgl_start - start index in data sgl for rx.
205 */
206struct bam_transaction {
207 struct scatterlist *cmd_sgl;
208 struct scatterlist *data_sgl;
209 u32 cmd_sgl_pos;
210 u32 cmd_sgl_start;
211 u32 tx_sgl_pos;
212 u32 tx_sgl_start;
213 u32 rx_sgl_pos;
214 u32 rx_sgl_start;
215};
216
Abhishek Sahu381dd242017-08-17 17:37:41 +0530217/*
218 * This data type corresponds to the nand dma descriptor
219 * @list - list for desc_info
220 * @dir - DMA transfer direction
221 * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
222 * ADM
223 * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
224 * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
225 * @dma_desc - low level DMA engine descriptor
226 */
Archit Tanejac76b78d2016-02-03 14:29:50 +0530227struct desc_info {
228 struct list_head node;
229
230 enum dma_data_direction dir;
Abhishek Sahu381dd242017-08-17 17:37:41 +0530231 union {
232 struct scatterlist adm_sgl;
233 struct {
234 struct scatterlist *bam_sgl;
235 int sgl_cnt;
236 };
237 };
Archit Tanejac76b78d2016-02-03 14:29:50 +0530238 struct dma_async_tx_descriptor *dma_desc;
239};
240
241/*
242 * holds the current register values that we want to write. acts as a contiguous
243 * chunk of memory which we use to write the controller registers through DMA.
244 */
245struct nandc_regs {
246 __le32 cmd;
247 __le32 addr0;
248 __le32 addr1;
249 __le32 chip_sel;
250 __le32 exec;
251
252 __le32 cfg0;
253 __le32 cfg1;
254 __le32 ecc_bch_cfg;
255
256 __le32 clrflashstatus;
257 __le32 clrreadstatus;
258
259 __le32 cmd1;
260 __le32 vld;
261
262 __le32 orig_cmd1;
263 __le32 orig_vld;
264
265 __le32 ecc_buf_cfg;
266};
267
268/*
269 * NAND controller data struct
270 *
271 * @controller: base controller structure
272 * @host_list: list containing all the chips attached to the
273 * controller
274 * @dev: parent device
275 * @base: MMIO base
276 * @base_dma: physical base address of controller registers
277 * @core_clk: controller clock
278 * @aon_clk: another controller clock
279 *
280 * @chan: dma channel
281 * @cmd_crci: ADM DMA CRCI for command flow control
282 * @data_crci: ADM DMA CRCI for data flow control
283 * @desc_list: DMA descriptor list (list of desc_infos)
284 *
285 * @data_buffer: our local DMA buffer for page read/writes,
286 * used when we can't use the buffer provided
287 * by upper layers directly
288 * @buf_size/count/start: markers for chip->read_buf/write_buf functions
289 * @reg_read_buf: local buffer for reading back registers via DMA
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530290 * @reg_read_dma: contains dma address for register read buffer
Archit Tanejac76b78d2016-02-03 14:29:50 +0530291 * @reg_read_pos: marker for data read in reg_read_buf
292 *
293 * @regs: a contiguous chunk of memory for DMA register
294 * writes. contains the register values to be
295 * written to controller
296 * @cmd1/vld: some fixed controller register values
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530297 * @props: properties of current NAND controller,
Archit Tanejac76b78d2016-02-03 14:29:50 +0530298 * initialized via DT match data
Abhishek Sahucb80f112017-08-17 17:37:40 +0530299 * @max_cwperpage: maximum QPIC codewords required. calculated
300 * from all connected NAND devices pagesize
Archit Tanejac76b78d2016-02-03 14:29:50 +0530301 */
302struct qcom_nand_controller {
303 struct nand_hw_control controller;
304 struct list_head host_list;
305
306 struct device *dev;
307
308 void __iomem *base;
309 dma_addr_t base_dma;
310
311 struct clk *core_clk;
312 struct clk *aon_clk;
313
Abhishek Sahu497d7d82017-08-11 17:09:19 +0530314 union {
315 /* will be used only by QPIC for BAM DMA */
316 struct {
317 struct dma_chan *tx_chan;
318 struct dma_chan *rx_chan;
319 struct dma_chan *cmd_chan;
320 };
321
322 /* will be used only by EBI2 for ADM DMA */
323 struct {
324 struct dma_chan *chan;
325 unsigned int cmd_crci;
326 unsigned int data_crci;
327 };
328 };
329
Archit Tanejac76b78d2016-02-03 14:29:50 +0530330 struct list_head desc_list;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530331 struct bam_transaction *bam_txn;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530332
333 u8 *data_buffer;
334 int buf_size;
335 int buf_count;
336 int buf_start;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530337 unsigned int max_cwperpage;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530338
339 __le32 *reg_read_buf;
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530340 dma_addr_t reg_read_dma;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530341 int reg_read_pos;
342
343 struct nandc_regs *regs;
344
345 u32 cmd1, vld;
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530346 const struct qcom_nandc_props *props;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530347};
348
349/*
350 * NAND chip structure
351 *
352 * @chip: base NAND chip structure
353 * @node: list node to add itself to host_list in
354 * qcom_nand_controller
355 *
356 * @cs: chip select value for this chip
357 * @cw_size: the number of bytes in a single step/codeword
358 * of a page, consisting of all data, ecc, spare
359 * and reserved bytes
360 * @cw_data: the number of bytes within a codeword protected
361 * by ECC
362 * @use_ecc: request the controller to use ECC for the
363 * upcoming read/write
364 * @bch_enabled: flag to tell whether BCH ECC mode is used
365 * @ecc_bytes_hw: ECC bytes used by controller hardware for this
366 * chip
367 * @status: value to be returned if NAND_CMD_STATUS command
368 * is executed
369 * @last_command: keeps track of last command on this chip. used
370 * for reading correct status
371 *
372 * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
373 * ecc/non-ecc mode for the current nand flash
374 * device
375 */
376struct qcom_nand_host {
377 struct nand_chip chip;
378 struct list_head node;
379
380 int cs;
381 int cw_size;
382 int cw_data;
383 bool use_ecc;
384 bool bch_enabled;
385 int ecc_bytes_hw;
386 int spare_bytes;
387 int bbm_size;
388 u8 status;
389 int last_command;
390
391 u32 cfg0, cfg1;
392 u32 cfg0_raw, cfg1_raw;
393 u32 ecc_buf_cfg;
394 u32 ecc_bch_cfg;
395 u32 clrflashstatus;
396 u32 clrreadstatus;
397};
398
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530399/*
400 * This data type corresponds to the NAND controller properties which varies
401 * among different NAND controllers.
402 * @ecc_modes - ecc mode for NAND
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +0530403 * @is_bam - whether NAND controller is using BAM
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530404 */
405struct qcom_nandc_props {
406 u32 ecc_modes;
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +0530407 bool is_bam;
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530408};
409
Abhishek Sahucb80f112017-08-17 17:37:40 +0530410/* Frees the BAM transaction memory */
411static void free_bam_transaction(struct qcom_nand_controller *nandc)
412{
413 struct bam_transaction *bam_txn = nandc->bam_txn;
414
415 devm_kfree(nandc->dev, bam_txn);
416}
417
418/* Allocates and Initializes the BAM transaction */
419static struct bam_transaction *
420alloc_bam_transaction(struct qcom_nand_controller *nandc)
421{
422 struct bam_transaction *bam_txn;
423 size_t bam_txn_size;
424 unsigned int num_cw = nandc->max_cwperpage;
425 void *bam_txn_buf;
426
427 bam_txn_size =
428 sizeof(*bam_txn) + num_cw *
429 ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
430 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
431
432 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
433 if (!bam_txn_buf)
434 return NULL;
435
436 bam_txn = bam_txn_buf;
437 bam_txn_buf += sizeof(*bam_txn);
438
439 bam_txn->cmd_sgl = bam_txn_buf;
440 bam_txn_buf +=
441 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
442
443 bam_txn->data_sgl = bam_txn_buf;
444
445 return bam_txn;
446}
447
Archit Tanejac76b78d2016-02-03 14:29:50 +0530448static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
449{
450 return container_of(chip, struct qcom_nand_host, chip);
451}
452
453static inline struct qcom_nand_controller *
454get_qcom_nand_controller(struct nand_chip *chip)
455{
456 return container_of(chip->controller, struct qcom_nand_controller,
457 controller);
458}
459
460static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
461{
462 return ioread32(nandc->base + offset);
463}
464
465static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
466 u32 val)
467{
468 iowrite32(val, nandc->base + offset);
469}
470
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530471static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
472 bool is_cpu)
473{
474 if (!nandc->props->is_bam)
475 return;
476
477 if (is_cpu)
478 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
479 MAX_REG_RD *
480 sizeof(*nandc->reg_read_buf),
481 DMA_FROM_DEVICE);
482 else
483 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
484 MAX_REG_RD *
485 sizeof(*nandc->reg_read_buf),
486 DMA_FROM_DEVICE);
487}
488
Archit Tanejac76b78d2016-02-03 14:29:50 +0530489static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
490{
491 switch (offset) {
492 case NAND_FLASH_CMD:
493 return &regs->cmd;
494 case NAND_ADDR0:
495 return &regs->addr0;
496 case NAND_ADDR1:
497 return &regs->addr1;
498 case NAND_FLASH_CHIP_SELECT:
499 return &regs->chip_sel;
500 case NAND_EXEC_CMD:
501 return &regs->exec;
502 case NAND_FLASH_STATUS:
503 return &regs->clrflashstatus;
504 case NAND_DEV0_CFG0:
505 return &regs->cfg0;
506 case NAND_DEV0_CFG1:
507 return &regs->cfg1;
508 case NAND_DEV0_ECC_CFG:
509 return &regs->ecc_bch_cfg;
510 case NAND_READ_STATUS:
511 return &regs->clrreadstatus;
512 case NAND_DEV_CMD1:
513 return &regs->cmd1;
514 case NAND_DEV_CMD1_RESTORE:
515 return &regs->orig_cmd1;
516 case NAND_DEV_CMD_VLD:
517 return &regs->vld;
518 case NAND_DEV_CMD_VLD_RESTORE:
519 return &regs->orig_vld;
520 case NAND_EBI2_ECC_BUF_CFG:
521 return &regs->ecc_buf_cfg;
522 default:
523 return NULL;
524 }
525}
526
527static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
528 u32 val)
529{
530 struct nandc_regs *regs = nandc->regs;
531 __le32 *reg;
532
533 reg = offset_to_nandc_reg(regs, offset);
534
535 if (reg)
536 *reg = cpu_to_le32(val);
537}
538
539/* helper to configure address register values */
540static void set_address(struct qcom_nand_host *host, u16 column, int page)
541{
542 struct nand_chip *chip = &host->chip;
543 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
544
545 if (chip->options & NAND_BUSWIDTH_16)
546 column >>= 1;
547
548 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
549 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
550}
551
552/*
553 * update_rw_regs: set up read/write register values, these will be
554 * written to the NAND controller registers via DMA
555 *
556 * @num_cw: number of steps for the read/write operation
557 * @read: read or write operation
558 */
559static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
560{
561 struct nand_chip *chip = &host->chip;
562 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
563 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
564
565 if (read) {
566 if (host->use_ecc)
567 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
568 else
569 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
570 } else {
571 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
572 }
573
574 if (host->use_ecc) {
575 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
576 (num_cw - 1) << CW_PER_PAGE;
577
578 cfg1 = host->cfg1;
579 ecc_bch_cfg = host->ecc_bch_cfg;
580 } else {
581 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
582 (num_cw - 1) << CW_PER_PAGE;
583
584 cfg1 = host->cfg1_raw;
585 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
586 }
587
588 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
589 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
590 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
591 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
592 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
593 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
594 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
595 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
596}
597
Abhishek Sahu381dd242017-08-17 17:37:41 +0530598/*
599 * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
600 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
601 * which will be submitted to DMA engine.
602 */
603static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
604 struct dma_chan *chan,
605 unsigned long flags)
606{
607 struct desc_info *desc;
608 struct scatterlist *sgl;
609 unsigned int sgl_cnt;
610 int ret;
611 struct bam_transaction *bam_txn = nandc->bam_txn;
612 enum dma_transfer_direction dir_eng;
613 struct dma_async_tx_descriptor *dma_desc;
614
615 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
616 if (!desc)
617 return -ENOMEM;
618
619 if (chan == nandc->cmd_chan) {
620 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
621 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
622 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
623 dir_eng = DMA_MEM_TO_DEV;
624 desc->dir = DMA_TO_DEVICE;
625 } else if (chan == nandc->tx_chan) {
626 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
627 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
628 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
629 dir_eng = DMA_MEM_TO_DEV;
630 desc->dir = DMA_TO_DEVICE;
631 } else {
632 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
633 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
634 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
635 dir_eng = DMA_DEV_TO_MEM;
636 desc->dir = DMA_FROM_DEVICE;
637 }
638
639 sg_mark_end(sgl + sgl_cnt - 1);
640 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
641 if (ret == 0) {
642 dev_err(nandc->dev, "failure in mapping desc\n");
643 kfree(desc);
644 return -ENOMEM;
645 }
646
647 desc->sgl_cnt = sgl_cnt;
648 desc->bam_sgl = sgl;
649
650 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
651 flags);
652
653 if (!dma_desc) {
654 dev_err(nandc->dev, "failure in prep desc\n");
655 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
656 kfree(desc);
657 return -EINVAL;
658 }
659
660 desc->dma_desc = dma_desc;
661
662 list_add_tail(&desc->node, &nandc->desc_list);
663
664 return 0;
665}
666
667static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
668 int reg_off, const void *vaddr, int size,
669 bool flow_control)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530670{
671 struct desc_info *desc;
672 struct dma_async_tx_descriptor *dma_desc;
673 struct scatterlist *sgl;
674 struct dma_slave_config slave_conf;
675 enum dma_transfer_direction dir_eng;
676 int ret;
677
678 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
679 if (!desc)
680 return -ENOMEM;
681
Abhishek Sahu381dd242017-08-17 17:37:41 +0530682 sgl = &desc->adm_sgl;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530683
684 sg_init_one(sgl, vaddr, size);
685
686 if (read) {
687 dir_eng = DMA_DEV_TO_MEM;
688 desc->dir = DMA_FROM_DEVICE;
689 } else {
690 dir_eng = DMA_MEM_TO_DEV;
691 desc->dir = DMA_TO_DEVICE;
692 }
693
694 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
695 if (ret == 0) {
696 ret = -ENOMEM;
697 goto err;
698 }
699
700 memset(&slave_conf, 0x00, sizeof(slave_conf));
701
702 slave_conf.device_fc = flow_control;
703 if (read) {
704 slave_conf.src_maxburst = 16;
705 slave_conf.src_addr = nandc->base_dma + reg_off;
706 slave_conf.slave_id = nandc->data_crci;
707 } else {
708 slave_conf.dst_maxburst = 16;
709 slave_conf.dst_addr = nandc->base_dma + reg_off;
710 slave_conf.slave_id = nandc->cmd_crci;
711 }
712
713 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
714 if (ret) {
715 dev_err(nandc->dev, "failed to configure dma channel\n");
716 goto err;
717 }
718
719 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
720 if (!dma_desc) {
721 dev_err(nandc->dev, "failed to prepare desc\n");
722 ret = -EINVAL;
723 goto err;
724 }
725
726 desc->dma_desc = dma_desc;
727
728 list_add_tail(&desc->node, &nandc->desc_list);
729
730 return 0;
731err:
732 kfree(desc);
733
734 return ret;
735}
736
737/*
738 * read_reg_dma: prepares a descriptor to read a given number of
739 * contiguous registers to the reg_read_buf pointer
740 *
741 * @first: offset of the first register in the contiguous block
742 * @num_regs: number of registers to read
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530743 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530744 */
745static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530746 int num_regs, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530747{
748 bool flow_control = false;
749 void *vaddr;
750 int size;
751
752 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
753 flow_control = true;
754
755 size = num_regs * sizeof(u32);
756 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
757 nandc->reg_read_pos += num_regs;
758
Abhishek Sahu381dd242017-08-17 17:37:41 +0530759 return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530760}
761
762/*
763 * write_reg_dma: prepares a descriptor to write a given number of
764 * contiguous registers
765 *
766 * @first: offset of the first register in the contiguous block
767 * @num_regs: number of registers to write
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530768 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530769 */
770static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530771 int num_regs, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530772{
773 bool flow_control = false;
774 struct nandc_regs *regs = nandc->regs;
775 void *vaddr;
776 int size;
777
778 vaddr = offset_to_nandc_reg(regs, first);
779
780 if (first == NAND_FLASH_CMD)
781 flow_control = true;
782
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530783 if (first == NAND_EXEC_CMD)
784 flags |= NAND_BAM_NWD;
785
Archit Tanejac76b78d2016-02-03 14:29:50 +0530786 if (first == NAND_DEV_CMD1_RESTORE)
787 first = NAND_DEV_CMD1;
788
789 if (first == NAND_DEV_CMD_VLD_RESTORE)
790 first = NAND_DEV_CMD_VLD;
791
792 size = num_regs * sizeof(u32);
793
Abhishek Sahu381dd242017-08-17 17:37:41 +0530794 return prep_adm_dma_desc(nandc, false, first, vaddr, size,
795 flow_control);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530796}
797
798/*
799 * read_data_dma: prepares a DMA descriptor to transfer data from the
800 * controller's internal buffer to the buffer 'vaddr'
801 *
802 * @reg_off: offset within the controller's data buffer
803 * @vaddr: virtual address of the buffer we want to write to
804 * @size: DMA transaction size in bytes
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530805 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530806 */
807static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530808 const u8 *vaddr, int size, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530809{
Abhishek Sahu381dd242017-08-17 17:37:41 +0530810 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530811}
812
813/*
814 * write_data_dma: prepares a DMA descriptor to transfer data from
815 * 'vaddr' to the controller's internal buffer
816 *
817 * @reg_off: offset within the controller's data buffer
818 * @vaddr: virtual address of the buffer we want to read from
819 * @size: DMA transaction size in bytes
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530820 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530821 */
822static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530823 const u8 *vaddr, int size, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530824{
Abhishek Sahu381dd242017-08-17 17:37:41 +0530825 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530826}
827
828/*
Abhishek Sahubde43302017-07-19 17:17:55 +0530829 * Helper to prepare DMA descriptors for configuring registers
830 * before reading a NAND page.
Archit Tanejac76b78d2016-02-03 14:29:50 +0530831 */
Abhishek Sahubde43302017-07-19 17:17:55 +0530832static void config_nand_page_read(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530833{
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530834 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
835 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
836 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
Abhishek Sahubde43302017-07-19 17:17:55 +0530837}
Archit Tanejac76b78d2016-02-03 14:29:50 +0530838
Abhishek Sahubde43302017-07-19 17:17:55 +0530839/*
840 * Helper to prepare DMA descriptors for configuring registers
841 * before reading each codeword in NAND page.
842 */
843static void config_nand_cw_read(struct qcom_nand_controller *nandc)
844{
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530845 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
846 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530847
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530848 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
849 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
850 NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530851}
852
853/*
Abhishek Sahubde43302017-07-19 17:17:55 +0530854 * Helper to prepare dma descriptors to configure registers needed for reading a
855 * single codeword in page
Archit Tanejac76b78d2016-02-03 14:29:50 +0530856 */
Abhishek Sahubde43302017-07-19 17:17:55 +0530857static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
858{
859 config_nand_page_read(nandc);
860 config_nand_cw_read(nandc);
861}
862
Abhishek Sahu77cc5362017-07-19 17:17:56 +0530863/*
864 * Helper to prepare DMA descriptors used to configure registers needed for
865 * before writing a NAND page.
866 */
867static void config_nand_page_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530868{
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530869 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
870 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
871 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
872 NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530873}
874
Abhishek Sahu77cc5362017-07-19 17:17:56 +0530875/*
876 * Helper to prepare DMA descriptors for configuring registers
877 * before writing each codeword in NAND page.
878 */
879static void config_nand_cw_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530880{
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530881 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
882 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530883
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530884 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530885
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530886 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
887 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530888}
889
890/*
891 * the following functions are used within chip->cmdfunc() to perform different
892 * NAND_CMD_* commands
893 */
894
895/* sets up descriptors for NAND_CMD_PARAM */
896static int nandc_param(struct qcom_nand_host *host)
897{
898 struct nand_chip *chip = &host->chip;
899 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
900
901 /*
902 * NAND_CMD_PARAM is called before we know much about the FLASH chip
903 * in use. we configure the controller to perform a raw read of 512
904 * bytes to read onfi params
905 */
906 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
907 nandc_set_reg(nandc, NAND_ADDR0, 0);
908 nandc_set_reg(nandc, NAND_ADDR1, 0);
909 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
910 | 512 << UD_SIZE_BYTES
911 | 5 << NUM_ADDR_CYCLES
912 | 0 << SPARE_SIZE_BYTES);
913 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
914 | 0 << CS_ACTIVE_BSY
915 | 17 << BAD_BLOCK_BYTE_NUM
916 | 1 << BAD_BLOCK_IN_SPARE_AREA
917 | 2 << WR_RD_BSY_GAP
918 | 0 << WIDE_FLASH
919 | 1 << DEV0_CFG1_ECC_DISABLE);
920 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
921
922 /* configure CMD1 and VLD for ONFI param probing */
923 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530924 (nandc->vld & ~READ_START_VLD));
Archit Tanejac76b78d2016-02-03 14:29:50 +0530925 nandc_set_reg(nandc, NAND_DEV_CMD1,
926 (nandc->cmd1 & ~(0xFF << READ_ADDR))
927 | NAND_CMD_PARAM << READ_ADDR);
928
929 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
930
931 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
932 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
933
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530934 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
935 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530936
937 nandc->buf_count = 512;
938 memset(nandc->data_buffer, 0xff, nandc->buf_count);
939
Abhishek Sahubde43302017-07-19 17:17:55 +0530940 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530941
942 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530943 nandc->buf_count, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530944
945 /* restore CMD1 and VLD regs */
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530946 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
947 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530948
949 return 0;
950}
951
952/* sets up descriptors for NAND_CMD_ERASE1 */
953static int erase_block(struct qcom_nand_host *host, int page_addr)
954{
955 struct nand_chip *chip = &host->chip;
956 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
957
958 nandc_set_reg(nandc, NAND_FLASH_CMD,
959 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
960 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
961 nandc_set_reg(nandc, NAND_ADDR1, 0);
962 nandc_set_reg(nandc, NAND_DEV0_CFG0,
963 host->cfg0_raw & ~(7 << CW_PER_PAGE));
964 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
965 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
966 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
967 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
968
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530969 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
970 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
971 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530972
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530973 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530974
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530975 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
976 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530977
978 return 0;
979}
980
981/* sets up descriptors for NAND_CMD_READID */
982static int read_id(struct qcom_nand_host *host, int column)
983{
984 struct nand_chip *chip = &host->chip;
985 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
986
987 if (column == -1)
988 return 0;
989
990 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
991 nandc_set_reg(nandc, NAND_ADDR0, column);
992 nandc_set_reg(nandc, NAND_ADDR1, 0);
993 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
994 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
995
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530996 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
997 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530998
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530999 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301000
1001 return 0;
1002}
1003
1004/* sets up descriptors for NAND_CMD_RESET */
1005static int reset(struct qcom_nand_host *host)
1006{
1007 struct nand_chip *chip = &host->chip;
1008 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1009
1010 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1011 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1012
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301013 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1014 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301015
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301016 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301017
1018 return 0;
1019}
1020
1021/* helpers to submit/free our list of dma descriptors */
1022static int submit_descs(struct qcom_nand_controller *nandc)
1023{
1024 struct desc_info *desc;
1025 dma_cookie_t cookie = 0;
Abhishek Sahu381dd242017-08-17 17:37:41 +05301026 struct bam_transaction *bam_txn = nandc->bam_txn;
1027 int r;
1028
1029 if (nandc->props->is_bam) {
1030 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1031 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1032 if (r)
1033 return r;
1034 }
1035
1036 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1037 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1038 DMA_PREP_INTERRUPT);
1039 if (r)
1040 return r;
1041 }
1042
1043 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1044 r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
1045 if (r)
1046 return r;
1047 }
1048 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05301049
1050 list_for_each_entry(desc, &nandc->desc_list, node)
1051 cookie = dmaengine_submit(desc->dma_desc);
1052
Abhishek Sahu381dd242017-08-17 17:37:41 +05301053 if (nandc->props->is_bam) {
1054 dma_async_issue_pending(nandc->tx_chan);
1055 dma_async_issue_pending(nandc->rx_chan);
1056
1057 if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
1058 return -ETIMEDOUT;
1059 } else {
1060 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1061 return -ETIMEDOUT;
1062 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05301063
1064 return 0;
1065}
1066
1067static void free_descs(struct qcom_nand_controller *nandc)
1068{
1069 struct desc_info *desc, *n;
1070
1071 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1072 list_del(&desc->node);
Abhishek Sahu381dd242017-08-17 17:37:41 +05301073
1074 if (nandc->props->is_bam)
1075 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1076 desc->sgl_cnt, desc->dir);
1077 else
1078 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1079 desc->dir);
1080
Archit Tanejac76b78d2016-02-03 14:29:50 +05301081 kfree(desc);
1082 }
1083}
1084
1085/* reset the register read buffer for next NAND operation */
1086static void clear_read_regs(struct qcom_nand_controller *nandc)
1087{
1088 nandc->reg_read_pos = 0;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301089 nandc_read_buffer_sync(nandc, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301090}
1091
1092static void pre_command(struct qcom_nand_host *host, int command)
1093{
1094 struct nand_chip *chip = &host->chip;
1095 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1096
1097 nandc->buf_count = 0;
1098 nandc->buf_start = 0;
1099 host->use_ecc = false;
1100 host->last_command = command;
1101
1102 clear_read_regs(nandc);
1103}
1104
1105/*
1106 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1107 * privately maintained status byte, this status byte can be read after
1108 * NAND_CMD_STATUS is called
1109 */
1110static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1111{
1112 struct nand_chip *chip = &host->chip;
1113 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1114 struct nand_ecc_ctrl *ecc = &chip->ecc;
1115 int num_cw;
1116 int i;
1117
1118 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301119 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301120
1121 for (i = 0; i < num_cw; i++) {
1122 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1123
1124 if (flash_status & FS_MPU_ERR)
1125 host->status &= ~NAND_STATUS_WP;
1126
1127 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1128 (flash_status &
1129 FS_DEVICE_STS_ERR)))
1130 host->status |= NAND_STATUS_FAIL;
1131 }
1132}
1133
1134static void post_command(struct qcom_nand_host *host, int command)
1135{
1136 struct nand_chip *chip = &host->chip;
1137 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1138
1139 switch (command) {
1140 case NAND_CMD_READID:
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301141 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301142 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1143 nandc->buf_count);
1144 break;
1145 case NAND_CMD_PAGEPROG:
1146 case NAND_CMD_ERASE1:
1147 parse_erase_write_errors(host, command);
1148 break;
1149 default:
1150 break;
1151 }
1152}
1153
1154/*
1155 * Implements chip->cmdfunc. It's only used for a limited set of commands.
1156 * The rest of the commands wouldn't be called by upper layers. For example,
1157 * NAND_CMD_READOOB would never be called because we have our own versions
1158 * of read_oob ops for nand_ecc_ctrl.
1159 */
1160static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1161 int column, int page_addr)
1162{
1163 struct nand_chip *chip = mtd_to_nand(mtd);
1164 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1165 struct nand_ecc_ctrl *ecc = &chip->ecc;
1166 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1167 bool wait = false;
1168 int ret = 0;
1169
1170 pre_command(host, command);
1171
1172 switch (command) {
1173 case NAND_CMD_RESET:
1174 ret = reset(host);
1175 wait = true;
1176 break;
1177
1178 case NAND_CMD_READID:
1179 nandc->buf_count = 4;
1180 ret = read_id(host, column);
1181 wait = true;
1182 break;
1183
1184 case NAND_CMD_PARAM:
1185 ret = nandc_param(host);
1186 wait = true;
1187 break;
1188
1189 case NAND_CMD_ERASE1:
1190 ret = erase_block(host, page_addr);
1191 wait = true;
1192 break;
1193
1194 case NAND_CMD_READ0:
1195 /* we read the entire page for now */
1196 WARN_ON(column != 0);
1197
1198 host->use_ecc = true;
1199 set_address(host, 0, page_addr);
1200 update_rw_regs(host, ecc->steps, true);
1201 break;
1202
1203 case NAND_CMD_SEQIN:
1204 WARN_ON(column != 0);
1205 set_address(host, 0, page_addr);
1206 break;
1207
1208 case NAND_CMD_PAGEPROG:
1209 case NAND_CMD_STATUS:
1210 case NAND_CMD_NONE:
1211 default:
1212 break;
1213 }
1214
1215 if (ret) {
1216 dev_err(nandc->dev, "failure executing command %d\n",
1217 command);
1218 free_descs(nandc);
1219 return;
1220 }
1221
1222 if (wait) {
1223 ret = submit_descs(nandc);
1224 if (ret)
1225 dev_err(nandc->dev,
1226 "failure submitting descs for command %d\n",
1227 command);
1228 }
1229
1230 free_descs(nandc);
1231
1232 post_command(host, command);
1233}
1234
1235/*
1236 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1237 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1238 *
1239 * when using RS ECC, the HW reports the same erros when reading an erased CW,
1240 * but it notifies that it is an erased CW by placing special characters at
1241 * certain offsets in the buffer.
1242 *
1243 * verify if the page is erased or not, and fix up the page for RS ECC by
1244 * replacing the special characters with 0xff.
1245 */
1246static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1247{
1248 u8 empty1, empty2;
1249
1250 /*
1251 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1252 * is erased by looking for 0x54s at offsets 3 and 175 from the
1253 * beginning of each codeword
1254 */
1255
1256 empty1 = data_buf[3];
1257 empty2 = data_buf[175];
1258
1259 /*
1260 * if the erased codework markers, if they exist override them with
1261 * 0xffs
1262 */
1263 if ((empty1 == 0x54 && empty2 == 0xff) ||
1264 (empty1 == 0xff && empty2 == 0x54)) {
1265 data_buf[3] = 0xff;
1266 data_buf[175] = 0xff;
1267 }
1268
1269 /*
1270 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1271 * restore the original values at the special offsets
1272 */
1273 if (memchr_inv(data_buf, 0xff, data_len)) {
1274 data_buf[3] = empty1;
1275 data_buf[175] = empty2;
1276
1277 return false;
1278 }
1279
1280 return true;
1281}
1282
1283struct read_stats {
1284 __le32 flash;
1285 __le32 buffer;
1286 __le32 erased_cw;
1287};
1288
1289/*
1290 * reads back status registers set by the controller to notify page read
1291 * errors. this is equivalent to what 'ecc->correct()' would do.
1292 */
1293static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1294 u8 *oob_buf)
1295{
1296 struct nand_chip *chip = &host->chip;
1297 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1298 struct mtd_info *mtd = nand_to_mtd(chip);
1299 struct nand_ecc_ctrl *ecc = &chip->ecc;
1300 unsigned int max_bitflips = 0;
1301 struct read_stats *buf;
1302 int i;
1303
1304 buf = (struct read_stats *)nandc->reg_read_buf;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301305 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301306
1307 for (i = 0; i < ecc->steps; i++, buf++) {
1308 u32 flash, buffer, erased_cw;
1309 int data_len, oob_len;
1310
1311 if (i == (ecc->steps - 1)) {
1312 data_len = ecc->size - ((ecc->steps - 1) << 2);
1313 oob_len = ecc->steps << 2;
1314 } else {
1315 data_len = host->cw_data;
1316 oob_len = 0;
1317 }
1318
1319 flash = le32_to_cpu(buf->flash);
1320 buffer = le32_to_cpu(buf->buffer);
1321 erased_cw = le32_to_cpu(buf->erased_cw);
1322
1323 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1324 bool erased;
1325
1326 /* ignore erased codeword errors */
1327 if (host->bch_enabled) {
1328 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1329 true : false;
1330 } else {
1331 erased = erased_chunk_check_and_fixup(data_buf,
1332 data_len);
1333 }
1334
1335 if (erased) {
1336 data_buf += data_len;
1337 if (oob_buf)
1338 oob_buf += oob_len + ecc->bytes;
1339 continue;
1340 }
1341
1342 if (buffer & BS_UNCORRECTABLE_BIT) {
1343 int ret, ecclen, extraooblen;
1344 void *eccbuf;
1345
1346 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1347 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1348 extraooblen = oob_buf ? oob_len : 0;
1349
1350 /*
1351 * make sure it isn't an erased page reported
1352 * as not-erased by HW because of a few bitflips
1353 */
1354 ret = nand_check_erased_ecc_chunk(data_buf,
1355 data_len, eccbuf, ecclen, oob_buf,
1356 extraooblen, ecc->strength);
1357 if (ret < 0) {
1358 mtd->ecc_stats.failed++;
1359 } else {
1360 mtd->ecc_stats.corrected += ret;
1361 max_bitflips =
1362 max_t(unsigned int, max_bitflips, ret);
1363 }
1364 }
1365 } else {
1366 unsigned int stat;
1367
1368 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1369 mtd->ecc_stats.corrected += stat;
1370 max_bitflips = max(max_bitflips, stat);
1371 }
1372
1373 data_buf += data_len;
1374 if (oob_buf)
1375 oob_buf += oob_len + ecc->bytes;
1376 }
1377
1378 return max_bitflips;
1379}
1380
1381/*
1382 * helper to perform the actual page read operation, used by ecc->read_page(),
1383 * ecc->read_oob()
1384 */
1385static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1386 u8 *oob_buf)
1387{
1388 struct nand_chip *chip = &host->chip;
1389 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1390 struct nand_ecc_ctrl *ecc = &chip->ecc;
1391 int i, ret;
1392
Abhishek Sahubde43302017-07-19 17:17:55 +05301393 config_nand_page_read(nandc);
1394
Archit Tanejac76b78d2016-02-03 14:29:50 +05301395 /* queue cmd descs for each codeword */
1396 for (i = 0; i < ecc->steps; i++) {
1397 int data_size, oob_size;
1398
1399 if (i == (ecc->steps - 1)) {
1400 data_size = ecc->size - ((ecc->steps - 1) << 2);
1401 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1402 host->spare_bytes;
1403 } else {
1404 data_size = host->cw_data;
1405 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1406 }
1407
Abhishek Sahubde43302017-07-19 17:17:55 +05301408 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301409
1410 if (data_buf)
1411 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301412 data_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301413
1414 /*
1415 * when ecc is enabled, the controller doesn't read the real
1416 * or dummy bad block markers in each chunk. To maintain a
1417 * consistent layout across RAW and ECC reads, we just
1418 * leave the real/dummy BBM offsets empty (i.e, filled with
1419 * 0xffs)
1420 */
1421 if (oob_buf) {
1422 int j;
1423
1424 for (j = 0; j < host->bbm_size; j++)
1425 *oob_buf++ = 0xff;
1426
1427 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301428 oob_buf, oob_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301429 }
1430
1431 if (data_buf)
1432 data_buf += data_size;
1433 if (oob_buf)
1434 oob_buf += oob_size;
1435 }
1436
1437 ret = submit_descs(nandc);
1438 if (ret)
1439 dev_err(nandc->dev, "failure to read page/oob\n");
1440
1441 free_descs(nandc);
1442
1443 return ret;
1444}
1445
1446/*
1447 * a helper that copies the last step/codeword of a page (containing free oob)
1448 * into our local buffer
1449 */
1450static int copy_last_cw(struct qcom_nand_host *host, int page)
1451{
1452 struct nand_chip *chip = &host->chip;
1453 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1454 struct nand_ecc_ctrl *ecc = &chip->ecc;
1455 int size;
1456 int ret;
1457
1458 clear_read_regs(nandc);
1459
1460 size = host->use_ecc ? host->cw_data : host->cw_size;
1461
1462 /* prepare a clean read buffer */
1463 memset(nandc->data_buffer, 0xff, size);
1464
1465 set_address(host, host->cw_size * (ecc->steps - 1), page);
1466 update_rw_regs(host, 1, true);
1467
Abhishek Sahubde43302017-07-19 17:17:55 +05301468 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301469
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301470 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301471
1472 ret = submit_descs(nandc);
1473 if (ret)
1474 dev_err(nandc->dev, "failed to copy last codeword\n");
1475
1476 free_descs(nandc);
1477
1478 return ret;
1479}
1480
1481/* implements ecc->read_page() */
1482static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1483 uint8_t *buf, int oob_required, int page)
1484{
1485 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1486 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1487 u8 *data_buf, *oob_buf = NULL;
1488 int ret;
1489
1490 data_buf = buf;
1491 oob_buf = oob_required ? chip->oob_poi : NULL;
1492
1493 ret = read_page_ecc(host, data_buf, oob_buf);
1494 if (ret) {
1495 dev_err(nandc->dev, "failure to read page\n");
1496 return ret;
1497 }
1498
1499 return parse_read_errors(host, data_buf, oob_buf);
1500}
1501
1502/* implements ecc->read_page_raw() */
1503static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1504 struct nand_chip *chip, uint8_t *buf,
1505 int oob_required, int page)
1506{
1507 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1508 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1509 u8 *data_buf, *oob_buf;
1510 struct nand_ecc_ctrl *ecc = &chip->ecc;
1511 int i, ret;
1512
1513 data_buf = buf;
1514 oob_buf = chip->oob_poi;
1515
1516 host->use_ecc = false;
1517 update_rw_regs(host, ecc->steps, true);
Abhishek Sahubde43302017-07-19 17:17:55 +05301518 config_nand_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301519
1520 for (i = 0; i < ecc->steps; i++) {
1521 int data_size1, data_size2, oob_size1, oob_size2;
1522 int reg_off = FLASH_BUF_ACC;
1523
1524 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1525 oob_size1 = host->bbm_size;
1526
1527 if (i == (ecc->steps - 1)) {
1528 data_size2 = ecc->size - data_size1 -
1529 ((ecc->steps - 1) << 2);
1530 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1531 host->spare_bytes;
1532 } else {
1533 data_size2 = host->cw_data - data_size1;
1534 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1535 }
1536
Abhishek Sahubde43302017-07-19 17:17:55 +05301537 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301538
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301539 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301540 reg_off += data_size1;
1541 data_buf += data_size1;
1542
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301543 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301544 reg_off += oob_size1;
1545 oob_buf += oob_size1;
1546
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301547 read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301548 reg_off += data_size2;
1549 data_buf += data_size2;
1550
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301551 read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301552 oob_buf += oob_size2;
1553 }
1554
1555 ret = submit_descs(nandc);
1556 if (ret)
1557 dev_err(nandc->dev, "failure to read raw page\n");
1558
1559 free_descs(nandc);
1560
1561 return 0;
1562}
1563
1564/* implements ecc->read_oob() */
1565static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1566 int page)
1567{
1568 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1569 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1570 struct nand_ecc_ctrl *ecc = &chip->ecc;
1571 int ret;
1572
1573 clear_read_regs(nandc);
1574
1575 host->use_ecc = true;
1576 set_address(host, 0, page);
1577 update_rw_regs(host, ecc->steps, true);
1578
1579 ret = read_page_ecc(host, NULL, chip->oob_poi);
1580 if (ret)
1581 dev_err(nandc->dev, "failure to read oob\n");
1582
1583 return ret;
1584}
1585
1586/* implements ecc->write_page() */
1587static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1588 const uint8_t *buf, int oob_required, int page)
1589{
1590 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1591 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1592 struct nand_ecc_ctrl *ecc = &chip->ecc;
1593 u8 *data_buf, *oob_buf;
1594 int i, ret;
1595
1596 clear_read_regs(nandc);
1597
1598 data_buf = (u8 *)buf;
1599 oob_buf = chip->oob_poi;
1600
1601 host->use_ecc = true;
1602 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301603 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301604
1605 for (i = 0; i < ecc->steps; i++) {
1606 int data_size, oob_size;
1607
1608 if (i == (ecc->steps - 1)) {
1609 data_size = ecc->size - ((ecc->steps - 1) << 2);
1610 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1611 host->spare_bytes;
1612 } else {
1613 data_size = host->cw_data;
1614 oob_size = ecc->bytes;
1615 }
1616
Archit Tanejac76b78d2016-02-03 14:29:50 +05301617
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301618 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
1619 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301620
1621 /*
1622 * when ECC is enabled, we don't really need to write anything
1623 * to oob for the first n - 1 codewords since these oob regions
1624 * just contain ECC bytes that's written by the controller
1625 * itself. For the last codeword, we skip the bbm positions and
1626 * write to the free oob area.
1627 */
1628 if (i == (ecc->steps - 1)) {
1629 oob_buf += host->bbm_size;
1630
1631 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301632 oob_buf, oob_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301633 }
1634
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301635 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301636
1637 data_buf += data_size;
1638 oob_buf += oob_size;
1639 }
1640
1641 ret = submit_descs(nandc);
1642 if (ret)
1643 dev_err(nandc->dev, "failure to write page\n");
1644
1645 free_descs(nandc);
1646
1647 return ret;
1648}
1649
1650/* implements ecc->write_page_raw() */
1651static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1652 struct nand_chip *chip, const uint8_t *buf,
1653 int oob_required, int page)
1654{
1655 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1656 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1657 struct nand_ecc_ctrl *ecc = &chip->ecc;
1658 u8 *data_buf, *oob_buf;
1659 int i, ret;
1660
1661 clear_read_regs(nandc);
1662
1663 data_buf = (u8 *)buf;
1664 oob_buf = chip->oob_poi;
1665
1666 host->use_ecc = false;
1667 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301668 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301669
1670 for (i = 0; i < ecc->steps; i++) {
1671 int data_size1, data_size2, oob_size1, oob_size2;
1672 int reg_off = FLASH_BUF_ACC;
1673
1674 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1675 oob_size1 = host->bbm_size;
1676
1677 if (i == (ecc->steps - 1)) {
1678 data_size2 = ecc->size - data_size1 -
1679 ((ecc->steps - 1) << 2);
1680 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1681 host->spare_bytes;
1682 } else {
1683 data_size2 = host->cw_data - data_size1;
1684 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1685 }
1686
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301687 write_data_dma(nandc, reg_off, data_buf, data_size1,
1688 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301689 reg_off += data_size1;
1690 data_buf += data_size1;
1691
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301692 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1693 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301694 reg_off += oob_size1;
1695 oob_buf += oob_size1;
1696
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301697 write_data_dma(nandc, reg_off, data_buf, data_size2,
1698 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301699 reg_off += data_size2;
1700 data_buf += data_size2;
1701
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301702 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301703 oob_buf += oob_size2;
1704
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301705 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301706 }
1707
1708 ret = submit_descs(nandc);
1709 if (ret)
1710 dev_err(nandc->dev, "failure to write raw page\n");
1711
1712 free_descs(nandc);
1713
1714 return ret;
1715}
1716
1717/*
1718 * implements ecc->write_oob()
1719 *
1720 * the NAND controller cannot write only data or only oob within a codeword,
1721 * since ecc is calculated for the combined codeword. we first copy the
1722 * entire contents for the last codeword(data + oob), replace the old oob
1723 * with the new one in chip->oob_poi, and then write the entire codeword.
1724 * this read-copy-write operation results in a slight performance loss.
1725 */
1726static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1727 int page)
1728{
1729 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1730 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1731 struct nand_ecc_ctrl *ecc = &chip->ecc;
1732 u8 *oob = chip->oob_poi;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301733 int data_size, oob_size;
1734 int ret, status = 0;
1735
1736 host->use_ecc = true;
1737
1738 ret = copy_last_cw(host, page);
1739 if (ret)
1740 return ret;
1741
1742 clear_read_regs(nandc);
1743
1744 /* calculate the data and oob size for the last codeword/step */
1745 data_size = ecc->size - ((ecc->steps - 1) << 2);
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01001746 oob_size = mtd->oobavail;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301747
1748 /* override new oob content to last codeword */
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01001749 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
1750 0, mtd->oobavail);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301751
1752 set_address(host, host->cw_size * (ecc->steps - 1), page);
1753 update_rw_regs(host, 1, false);
1754
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301755 config_nand_page_write(nandc);
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301756 write_data_dma(nandc, FLASH_BUF_ACC,
1757 nandc->data_buffer, data_size + oob_size, 0);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301758 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301759
1760 ret = submit_descs(nandc);
1761
1762 free_descs(nandc);
1763
1764 if (ret) {
1765 dev_err(nandc->dev, "failure to write oob\n");
1766 return -EIO;
1767 }
1768
1769 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1770
1771 status = chip->waitfunc(mtd, chip);
1772
1773 return status & NAND_STATUS_FAIL ? -EIO : 0;
1774}
1775
1776static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
1777{
1778 struct nand_chip *chip = mtd_to_nand(mtd);
1779 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1780 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1781 struct nand_ecc_ctrl *ecc = &chip->ecc;
1782 int page, ret, bbpos, bad = 0;
1783 u32 flash_status;
1784
1785 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1786
1787 /*
1788 * configure registers for a raw sub page read, the address is set to
1789 * the beginning of the last codeword, we don't care about reading ecc
1790 * portion of oob. we just want the first few bytes from this codeword
1791 * that contains the BBM
1792 */
1793 host->use_ecc = false;
1794
1795 ret = copy_last_cw(host, page);
1796 if (ret)
1797 goto err;
1798
1799 flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
1800
1801 if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
1802 dev_warn(nandc->dev, "error when trying to read BBM\n");
1803 goto err;
1804 }
1805
1806 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
1807
1808 bad = nandc->data_buffer[bbpos] != 0xff;
1809
1810 if (chip->options & NAND_BUSWIDTH_16)
1811 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
1812err:
1813 return bad;
1814}
1815
1816static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
1817{
1818 struct nand_chip *chip = mtd_to_nand(mtd);
1819 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1820 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1821 struct nand_ecc_ctrl *ecc = &chip->ecc;
1822 int page, ret, status = 0;
1823
1824 clear_read_regs(nandc);
1825
1826 /*
1827 * to mark the BBM as bad, we flash the entire last codeword with 0s.
1828 * we don't care about the rest of the content in the codeword since
1829 * we aren't going to use this block again
1830 */
1831 memset(nandc->data_buffer, 0x00, host->cw_size);
1832
1833 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1834
1835 /* prepare write */
1836 host->use_ecc = false;
1837 set_address(host, host->cw_size * (ecc->steps - 1), page);
1838 update_rw_regs(host, 1, false);
1839
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301840 config_nand_page_write(nandc);
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301841 write_data_dma(nandc, FLASH_BUF_ACC,
1842 nandc->data_buffer, host->cw_size, 0);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301843 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301844
1845 ret = submit_descs(nandc);
1846
1847 free_descs(nandc);
1848
1849 if (ret) {
1850 dev_err(nandc->dev, "failure to update BBM\n");
1851 return -EIO;
1852 }
1853
1854 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1855
1856 status = chip->waitfunc(mtd, chip);
1857
1858 return status & NAND_STATUS_FAIL ? -EIO : 0;
1859}
1860
1861/*
1862 * the three functions below implement chip->read_byte(), chip->read_buf()
1863 * and chip->write_buf() respectively. these aren't used for
1864 * reading/writing page data, they are used for smaller data like reading
1865 * id, status etc
1866 */
1867static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
1868{
1869 struct nand_chip *chip = mtd_to_nand(mtd);
1870 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1871 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1872 u8 *buf = nandc->data_buffer;
1873 u8 ret = 0x0;
1874
1875 if (host->last_command == NAND_CMD_STATUS) {
1876 ret = host->status;
1877
1878 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
1879
1880 return ret;
1881 }
1882
1883 if (nandc->buf_start < nandc->buf_count)
1884 ret = buf[nandc->buf_start++];
1885
1886 return ret;
1887}
1888
1889static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1890{
1891 struct nand_chip *chip = mtd_to_nand(mtd);
1892 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1893 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
1894
1895 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
1896 nandc->buf_start += real_len;
1897}
1898
1899static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
1900 int len)
1901{
1902 struct nand_chip *chip = mtd_to_nand(mtd);
1903 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1904 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
1905
1906 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
1907
1908 nandc->buf_start += real_len;
1909}
1910
1911/* we support only one external chip for now */
1912static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
1913{
1914 struct nand_chip *chip = mtd_to_nand(mtd);
1915 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1916
1917 if (chipnr <= 0)
1918 return;
1919
1920 dev_warn(nandc->dev, "invalid chip select\n");
1921}
1922
1923/*
1924 * NAND controller page layout info
1925 *
1926 * Layout with ECC enabled:
1927 *
1928 * |----------------------| |---------------------------------|
1929 * | xx.......yy| | *********xx.......yy|
1930 * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
1931 * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
1932 * | xx.......yy| | *********xx.......yy|
1933 * |----------------------| |---------------------------------|
1934 * codeword 1,2..n-1 codeword n
1935 * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
1936 *
1937 * n = Number of codewords in the page
1938 * . = ECC bytes
1939 * * = Spare/free bytes
1940 * x = Unused byte(s)
1941 * y = Reserved byte(s)
1942 *
1943 * 2K page: n = 4, spare = 16 bytes
1944 * 4K page: n = 8, spare = 32 bytes
1945 * 8K page: n = 16, spare = 64 bytes
1946 *
1947 * the qcom nand controller operates at a sub page/codeword level. each
1948 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
1949 * the number of ECC bytes vary based on the ECC strength and the bus width.
1950 *
1951 * the first n - 1 codewords contains 516 bytes of user data, the remaining
1952 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
1953 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
1954 *
1955 * When we access a page with ECC enabled, the reserved bytes(s) are not
1956 * accessible at all. When reading, we fill up these unreadable positions
1957 * with 0xffs. When writing, the controller skips writing the inaccessible
1958 * bytes.
1959 *
1960 * Layout with ECC disabled:
1961 *
1962 * |------------------------------| |---------------------------------------|
1963 * | yy xx.......| | bb *********xx.......|
1964 * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
1965 * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
1966 * | yy xx.......| | bb *********xx.......|
1967 * |------------------------------| |---------------------------------------|
1968 * codeword 1,2..n-1 codeword n
1969 * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
1970 *
1971 * n = Number of codewords in the page
1972 * . = ECC bytes
1973 * * = Spare/free bytes
1974 * x = Unused byte(s)
1975 * y = Dummy Bad Bock byte(s)
1976 * b = Real Bad Block byte(s)
1977 * size1/size2 = function of codeword size and 'n'
1978 *
1979 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
1980 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
1981 * Block Markers. In the last codeword, this position contains the real BBM
1982 *
1983 * In order to have a consistent layout between RAW and ECC modes, we assume
1984 * the following OOB layout arrangement:
1985 *
1986 * |-----------| |--------------------|
1987 * |yyxx.......| |bb*********xx.......|
1988 * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
1989 * |yyxx.......| |bb*********xx.......|
1990 * |yyxx.......| |bb*********xx.......|
1991 * |-----------| |--------------------|
1992 * first n - 1 nth OOB region
1993 * OOB regions
1994 *
1995 * n = Number of codewords in the page
1996 * . = ECC bytes
1997 * * = FREE OOB bytes
1998 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
1999 * x = Unused byte(s)
2000 * b = Real bad block byte(s) (inaccessible when ECC enabled)
2001 *
2002 * This layout is read as is when ECC is disabled. When ECC is enabled, the
2003 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2004 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
Boris Brezillon421e81c2016-03-18 17:54:27 +01002005 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2006 * the sum of the three).
Archit Tanejac76b78d2016-02-03 14:29:50 +05302007 */
Boris Brezillon421e81c2016-03-18 17:54:27 +01002008static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2009 struct mtd_oob_region *oobregion)
Archit Tanejac76b78d2016-02-03 14:29:50 +05302010{
Boris Brezillon421e81c2016-03-18 17:54:27 +01002011 struct nand_chip *chip = mtd_to_nand(mtd);
2012 struct qcom_nand_host *host = to_qcom_nand_host(chip);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302013 struct nand_ecc_ctrl *ecc = &chip->ecc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302014
Boris Brezillon421e81c2016-03-18 17:54:27 +01002015 if (section > 1)
2016 return -ERANGE;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302017
Boris Brezillon421e81c2016-03-18 17:54:27 +01002018 if (!section) {
2019 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2020 host->bbm_size;
2021 oobregion->offset = 0;
2022 } else {
2023 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2024 oobregion->offset = mtd->oobsize - oobregion->length;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302025 }
2026
Boris Brezillon421e81c2016-03-18 17:54:27 +01002027 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302028}
2029
Boris Brezillon421e81c2016-03-18 17:54:27 +01002030static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2031 struct mtd_oob_region *oobregion)
2032{
2033 struct nand_chip *chip = mtd_to_nand(mtd);
2034 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2035 struct nand_ecc_ctrl *ecc = &chip->ecc;
2036
2037 if (section)
2038 return -ERANGE;
2039
2040 oobregion->length = ecc->steps * 4;
2041 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2042
2043 return 0;
2044}
2045
2046static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2047 .ecc = qcom_nand_ooblayout_ecc,
2048 .free = qcom_nand_ooblayout_free,
2049};
2050
Archit Tanejac76b78d2016-02-03 14:29:50 +05302051static int qcom_nand_host_setup(struct qcom_nand_host *host)
2052{
2053 struct nand_chip *chip = &host->chip;
2054 struct mtd_info *mtd = nand_to_mtd(chip);
2055 struct nand_ecc_ctrl *ecc = &chip->ecc;
2056 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2057 int cwperpage, bad_block_byte;
2058 bool wide_bus;
2059 int ecc_mode = 1;
2060
2061 /*
2062 * the controller requires each step consists of 512 bytes of data.
2063 * bail out if DT has populated a wrong step size.
2064 */
2065 if (ecc->size != NANDC_STEP_SIZE) {
2066 dev_err(nandc->dev, "invalid ecc size\n");
2067 return -EINVAL;
2068 }
2069
2070 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2071
2072 if (ecc->strength >= 8) {
2073 /* 8 bit ECC defaults to BCH ECC on all platforms */
2074 host->bch_enabled = true;
2075 ecc_mode = 1;
2076
2077 if (wide_bus) {
2078 host->ecc_bytes_hw = 14;
2079 host->spare_bytes = 0;
2080 host->bbm_size = 2;
2081 } else {
2082 host->ecc_bytes_hw = 13;
2083 host->spare_bytes = 2;
2084 host->bbm_size = 1;
2085 }
2086 } else {
2087 /*
2088 * if the controller supports BCH for 4 bit ECC, the controller
2089 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2090 * always 10 bytes
2091 */
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302092 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
Archit Tanejac76b78d2016-02-03 14:29:50 +05302093 /* BCH */
2094 host->bch_enabled = true;
2095 ecc_mode = 0;
2096
2097 if (wide_bus) {
2098 host->ecc_bytes_hw = 8;
2099 host->spare_bytes = 2;
2100 host->bbm_size = 2;
2101 } else {
2102 host->ecc_bytes_hw = 7;
2103 host->spare_bytes = 4;
2104 host->bbm_size = 1;
2105 }
2106 } else {
2107 /* RS */
2108 host->ecc_bytes_hw = 10;
2109
2110 if (wide_bus) {
2111 host->spare_bytes = 0;
2112 host->bbm_size = 2;
2113 } else {
2114 host->spare_bytes = 1;
2115 host->bbm_size = 1;
2116 }
2117 }
2118 }
2119
2120 /*
2121 * we consider ecc->bytes as the sum of all the non-data content in a
2122 * step. It gives us a clean representation of the oob area (even if
2123 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2124 * ECC and 12 bytes for 4 bit ECC
2125 */
2126 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2127
2128 ecc->read_page = qcom_nandc_read_page;
2129 ecc->read_page_raw = qcom_nandc_read_page_raw;
2130 ecc->read_oob = qcom_nandc_read_oob;
2131 ecc->write_page = qcom_nandc_write_page;
2132 ecc->write_page_raw = qcom_nandc_write_page_raw;
2133 ecc->write_oob = qcom_nandc_write_oob;
2134
2135 ecc->mode = NAND_ECC_HW;
2136
Boris Brezillon421e81c2016-03-18 17:54:27 +01002137 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302138
2139 cwperpage = mtd->writesize / ecc->size;
Abhishek Sahucb80f112017-08-17 17:37:40 +05302140 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2141 cwperpage);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302142
2143 /*
2144 * DATA_UD_BYTES varies based on whether the read/write command protects
2145 * spare data with ECC too. We protect spare data by default, so we set
2146 * it to main + spare data, which are 512 and 4 bytes respectively.
2147 */
2148 host->cw_data = 516;
2149
2150 /*
2151 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2152 * for 8 bit ECC
2153 */
2154 host->cw_size = host->cw_data + ecc->bytes;
2155
2156 if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
2157 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
2158 return -EINVAL;
2159 }
2160
2161 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2162
2163 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2164 | host->cw_data << UD_SIZE_BYTES
2165 | 0 << DISABLE_STATUS_AFTER_WRITE
2166 | 5 << NUM_ADDR_CYCLES
2167 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2168 | 0 << STATUS_BFR_READ
2169 | 1 << SET_RD_MODE_AFTER_STATUS
2170 | host->spare_bytes << SPARE_SIZE_BYTES;
2171
2172 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2173 | 0 << CS_ACTIVE_BSY
2174 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2175 | 0 << BAD_BLOCK_IN_SPARE_AREA
2176 | 2 << WR_RD_BSY_GAP
2177 | wide_bus << WIDE_FLASH
2178 | host->bch_enabled << ENABLE_BCH_ECC;
2179
2180 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2181 | host->cw_size << UD_SIZE_BYTES
2182 | 5 << NUM_ADDR_CYCLES
2183 | 0 << SPARE_SIZE_BYTES;
2184
2185 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2186 | 0 << CS_ACTIVE_BSY
2187 | 17 << BAD_BLOCK_BYTE_NUM
2188 | 1 << BAD_BLOCK_IN_SPARE_AREA
2189 | 2 << WR_RD_BSY_GAP
2190 | wide_bus << WIDE_FLASH
2191 | 1 << DEV0_CFG1_ECC_DISABLE;
2192
Abhishek Sahu10777de2017-08-03 17:56:39 +02002193 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
Archit Tanejac76b78d2016-02-03 14:29:50 +05302194 | 0 << ECC_SW_RESET
2195 | host->cw_data << ECC_NUM_DATA_BYTES
2196 | 1 << ECC_FORCE_CLK_OPEN
2197 | ecc_mode << ECC_MODE
2198 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2199
2200 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2201
2202 host->clrflashstatus = FS_READY_BSY_N;
2203 host->clrreadstatus = 0xc0;
2204
2205 dev_dbg(nandc->dev,
2206 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2207 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2208 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2209 cwperpage);
2210
2211 return 0;
2212}
2213
2214static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2215{
2216 int ret;
2217
2218 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2219 if (ret) {
2220 dev_err(nandc->dev, "failed to set DMA mask\n");
2221 return ret;
2222 }
2223
2224 /*
2225 * we use the internal buffer for reading ONFI params, reading small
2226 * data like ID and status, and preforming read-copy-write operations
2227 * when writing to a codeword partially. 532 is the maximum possible
2228 * size of a codeword for our nand controller
2229 */
2230 nandc->buf_size = 532;
2231
2232 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2233 GFP_KERNEL);
2234 if (!nandc->data_buffer)
2235 return -ENOMEM;
2236
2237 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2238 GFP_KERNEL);
2239 if (!nandc->regs)
2240 return -ENOMEM;
2241
2242 nandc->reg_read_buf = devm_kzalloc(nandc->dev,
2243 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
2244 GFP_KERNEL);
2245 if (!nandc->reg_read_buf)
2246 return -ENOMEM;
2247
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302248 if (nandc->props->is_bam) {
Abhishek Sahu6192ff72017-08-17 17:37:39 +05302249 nandc->reg_read_dma =
2250 dma_map_single(nandc->dev, nandc->reg_read_buf,
2251 MAX_REG_RD *
2252 sizeof(*nandc->reg_read_buf),
2253 DMA_FROM_DEVICE);
2254 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2255 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2256 return -EIO;
2257 }
2258
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302259 nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2260 if (!nandc->tx_chan) {
2261 dev_err(nandc->dev, "failed to request tx channel\n");
2262 return -ENODEV;
2263 }
2264
2265 nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2266 if (!nandc->rx_chan) {
2267 dev_err(nandc->dev, "failed to request rx channel\n");
2268 return -ENODEV;
2269 }
2270
2271 nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2272 if (!nandc->cmd_chan) {
2273 dev_err(nandc->dev, "failed to request cmd channel\n");
2274 return -ENODEV;
2275 }
Abhishek Sahucb80f112017-08-17 17:37:40 +05302276
2277 /*
2278 * Initially allocate BAM transaction to read ONFI param page.
2279 * After detecting all the devices, this BAM transaction will
2280 * be freed and the next BAM tranasction will be allocated with
2281 * maximum codeword size
2282 */
2283 nandc->max_cwperpage = 1;
2284 nandc->bam_txn = alloc_bam_transaction(nandc);
2285 if (!nandc->bam_txn) {
2286 dev_err(nandc->dev,
2287 "failed to allocate bam transaction\n");
2288 return -ENOMEM;
2289 }
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302290 } else {
2291 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2292 if (!nandc->chan) {
2293 dev_err(nandc->dev,
2294 "failed to request slave channel\n");
2295 return -ENODEV;
2296 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302297 }
2298
2299 INIT_LIST_HEAD(&nandc->desc_list);
2300 INIT_LIST_HEAD(&nandc->host_list);
2301
Marc Gonzalezd45bc582016-07-27 11:23:52 +02002302 nand_hw_control_init(&nandc->controller);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302303
2304 return 0;
2305}
2306
2307static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2308{
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302309 if (nandc->props->is_bam) {
Abhishek Sahu6192ff72017-08-17 17:37:39 +05302310 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2311 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2312 MAX_REG_RD *
2313 sizeof(*nandc->reg_read_buf),
2314 DMA_FROM_DEVICE);
2315
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302316 if (nandc->tx_chan)
2317 dma_release_channel(nandc->tx_chan);
2318
2319 if (nandc->rx_chan)
2320 dma_release_channel(nandc->rx_chan);
2321
2322 if (nandc->cmd_chan)
2323 dma_release_channel(nandc->cmd_chan);
2324 } else {
2325 if (nandc->chan)
2326 dma_release_channel(nandc->chan);
2327 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302328}
2329
2330/* one time setup of a few nand controller registers */
2331static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2332{
2333 /* kill onenand */
2334 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
Abhishek Sahud8a9b322017-08-11 17:09:16 +05302335 nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302336
2337 /* enable ADM DMA */
2338 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2339
2340 /* save the original values of these registers */
2341 nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
Abhishek Sahud8a9b322017-08-11 17:09:16 +05302342 nandc->vld = NAND_DEV_CMD_VLD_VAL;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302343
2344 return 0;
2345}
2346
2347static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2348 struct qcom_nand_host *host,
2349 struct device_node *dn)
2350{
2351 struct nand_chip *chip = &host->chip;
2352 struct mtd_info *mtd = nand_to_mtd(chip);
2353 struct device *dev = nandc->dev;
2354 int ret;
2355
2356 ret = of_property_read_u32(dn, "reg", &host->cs);
2357 if (ret) {
2358 dev_err(dev, "can't get chip-select\n");
2359 return -ENXIO;
2360 }
2361
2362 nand_set_flash_node(chip, dn);
2363 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
2364 mtd->owner = THIS_MODULE;
2365 mtd->dev.parent = dev;
2366
2367 chip->cmdfunc = qcom_nandc_command;
2368 chip->select_chip = qcom_nandc_select_chip;
2369 chip->read_byte = qcom_nandc_read_byte;
2370 chip->read_buf = qcom_nandc_read_buf;
2371 chip->write_buf = qcom_nandc_write_buf;
Boris Brezillon4a78cc62017-05-26 17:10:15 +02002372 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
2373 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302374
2375 /*
2376 * the bad block marker is readable only when we read the last codeword
2377 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2378 * helpers don't allow us to read BB from a nand chip with ECC
2379 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2380 * and block_markbad helpers until we permanently switch to using
2381 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2382 */
2383 chip->block_bad = qcom_nandc_block_bad;
2384 chip->block_markbad = qcom_nandc_block_markbad;
2385
2386 chip->controller = &nandc->controller;
2387 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2388 NAND_SKIP_BBTSCAN;
2389
2390 /* set up initial status value */
2391 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2392
2393 ret = nand_scan_ident(mtd, 1, NULL);
2394 if (ret)
2395 return ret;
2396
2397 ret = qcom_nand_host_setup(host);
Abhishek Sahu89f51272017-07-19 17:17:58 +05302398
2399 return ret;
2400}
2401
2402static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
2403 struct qcom_nand_host *host,
2404 struct device_node *dn)
2405{
2406 struct nand_chip *chip = &host->chip;
2407 struct mtd_info *mtd = nand_to_mtd(chip);
2408 int ret;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302409
2410 ret = nand_scan_tail(mtd);
2411 if (ret)
2412 return ret;
2413
Abhishek Sahu89f51272017-07-19 17:17:58 +05302414 ret = mtd_device_register(mtd, NULL, 0);
2415 if (ret)
2416 nand_cleanup(mtd_to_nand(mtd));
2417
2418 return ret;
2419}
2420
2421static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2422{
2423 struct device *dev = nandc->dev;
2424 struct device_node *dn = dev->of_node, *child;
2425 struct qcom_nand_host *host, *tmp;
2426 int ret;
2427
2428 for_each_available_child_of_node(dn, child) {
2429 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2430 if (!host) {
2431 of_node_put(child);
2432 return -ENOMEM;
2433 }
2434
2435 ret = qcom_nand_host_init(nandc, host, child);
2436 if (ret) {
2437 devm_kfree(dev, host);
2438 continue;
2439 }
2440
2441 list_add_tail(&host->node, &nandc->host_list);
2442 }
2443
2444 if (list_empty(&nandc->host_list))
2445 return -ENODEV;
2446
Abhishek Sahucb80f112017-08-17 17:37:40 +05302447 if (nandc->props->is_bam) {
2448 free_bam_transaction(nandc);
2449 nandc->bam_txn = alloc_bam_transaction(nandc);
2450 if (!nandc->bam_txn) {
2451 dev_err(nandc->dev,
2452 "failed to allocate bam transaction\n");
2453 return -ENOMEM;
2454 }
2455 }
2456
Abhishek Sahu89f51272017-07-19 17:17:58 +05302457 list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
2458 ret = qcom_nand_mtd_register(nandc, host, child);
2459 if (ret) {
2460 list_del(&host->node);
2461 devm_kfree(dev, host);
2462 }
2463 }
2464
2465 if (list_empty(&nandc->host_list))
2466 return -ENODEV;
2467
2468 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302469}
2470
2471/* parse custom DT properties here */
2472static int qcom_nandc_parse_dt(struct platform_device *pdev)
2473{
2474 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2475 struct device_node *np = nandc->dev->of_node;
2476 int ret;
2477
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302478 if (!nandc->props->is_bam) {
2479 ret = of_property_read_u32(np, "qcom,cmd-crci",
2480 &nandc->cmd_crci);
2481 if (ret) {
2482 dev_err(nandc->dev, "command CRCI unspecified\n");
2483 return ret;
2484 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302485
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302486 ret = of_property_read_u32(np, "qcom,data-crci",
2487 &nandc->data_crci);
2488 if (ret) {
2489 dev_err(nandc->dev, "data CRCI unspecified\n");
2490 return ret;
2491 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302492 }
2493
2494 return 0;
2495}
2496
2497static int qcom_nandc_probe(struct platform_device *pdev)
2498{
2499 struct qcom_nand_controller *nandc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302500 const void *dev_data;
2501 struct device *dev = &pdev->dev;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302502 struct resource *res;
2503 int ret;
2504
2505 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2506 if (!nandc)
2507 return -ENOMEM;
2508
2509 platform_set_drvdata(pdev, nandc);
2510 nandc->dev = dev;
2511
2512 dev_data = of_device_get_match_data(dev);
2513 if (!dev_data) {
2514 dev_err(&pdev->dev, "failed to get device data\n");
2515 return -ENODEV;
2516 }
2517
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302518 nandc->props = dev_data;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302519
2520 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2521 nandc->base = devm_ioremap_resource(dev, res);
2522 if (IS_ERR(nandc->base))
2523 return PTR_ERR(nandc->base);
2524
2525 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2526
2527 nandc->core_clk = devm_clk_get(dev, "core");
2528 if (IS_ERR(nandc->core_clk))
2529 return PTR_ERR(nandc->core_clk);
2530
2531 nandc->aon_clk = devm_clk_get(dev, "aon");
2532 if (IS_ERR(nandc->aon_clk))
2533 return PTR_ERR(nandc->aon_clk);
2534
2535 ret = qcom_nandc_parse_dt(pdev);
2536 if (ret)
2537 return ret;
2538
2539 ret = qcom_nandc_alloc(nandc);
2540 if (ret)
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302541 goto err_core_clk;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302542
2543 ret = clk_prepare_enable(nandc->core_clk);
2544 if (ret)
2545 goto err_core_clk;
2546
2547 ret = clk_prepare_enable(nandc->aon_clk);
2548 if (ret)
2549 goto err_aon_clk;
2550
2551 ret = qcom_nandc_setup(nandc);
2552 if (ret)
2553 goto err_setup;
2554
Abhishek Sahu89f51272017-07-19 17:17:58 +05302555 ret = qcom_probe_nand_devices(nandc);
2556 if (ret)
2557 goto err_setup;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302558
2559 return 0;
2560
Archit Tanejac76b78d2016-02-03 14:29:50 +05302561err_setup:
2562 clk_disable_unprepare(nandc->aon_clk);
2563err_aon_clk:
2564 clk_disable_unprepare(nandc->core_clk);
2565err_core_clk:
2566 qcom_nandc_unalloc(nandc);
2567
2568 return ret;
2569}
2570
2571static int qcom_nandc_remove(struct platform_device *pdev)
2572{
2573 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2574 struct qcom_nand_host *host;
2575
2576 list_for_each_entry(host, &nandc->host_list, node)
2577 nand_release(nand_to_mtd(&host->chip));
2578
2579 qcom_nandc_unalloc(nandc);
2580
2581 clk_disable_unprepare(nandc->aon_clk);
2582 clk_disable_unprepare(nandc->core_clk);
2583
2584 return 0;
2585}
2586
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302587static const struct qcom_nandc_props ipq806x_nandc_props = {
2588 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +05302589 .is_bam = false,
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302590};
Archit Tanejac76b78d2016-02-03 14:29:50 +05302591
2592/*
2593 * data will hold a struct pointer containing more differences once we support
2594 * more controller variants
2595 */
2596static const struct of_device_id qcom_nandc_of_match[] = {
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302597 {
2598 .compatible = "qcom,ipq806x-nand",
2599 .data = &ipq806x_nandc_props,
Archit Tanejac76b78d2016-02-03 14:29:50 +05302600 },
2601 {}
2602};
2603MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2604
2605static struct platform_driver qcom_nandc_driver = {
2606 .driver = {
2607 .name = "qcom-nandc",
2608 .of_match_table = qcom_nandc_of_match,
2609 },
2610 .probe = qcom_nandc_probe,
2611 .remove = qcom_nandc_remove,
2612};
2613module_platform_driver(qcom_nandc_driver);
2614
2615MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2616MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2617MODULE_LICENSE("GPL v2");