@@ -222,6 +222,86 @@ static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi)
222
222
}
223
223
}
224
224
225
+ static inline void mchp_coreqspi_write_read_op (struct mchp_coreqspi * qspi )
226
+ {
227
+ u32 control , data ;
228
+
229
+ qspi -> rx_len = qspi -> tx_len ;
230
+
231
+ control = readl_relaxed (qspi -> regs + REG_CONTROL );
232
+ control |= CONTROL_FLAGSX4 ;
233
+ writel_relaxed (control , qspi -> regs + REG_CONTROL );
234
+
235
+ while (qspi -> tx_len >= 4 ) {
236
+ while (readl_relaxed (qspi -> regs + REG_STATUS ) & STATUS_TXFIFOFULL )
237
+ ;
238
+
239
+ data = * (u32 * )qspi -> txbuf ;
240
+ qspi -> txbuf += 4 ;
241
+ qspi -> tx_len -= 4 ;
242
+ writel_relaxed (data , qspi -> regs + REG_X4_TX_DATA );
243
+
244
+ /*
245
+ * The rx FIFO is twice the size of the tx FIFO, so there is
246
+ * no requirement to block transmission if receive data is not
247
+ * ready, and it is fine to let the tx FIFO completely fill
248
+ * without reading anything from the rx FIFO. Once the tx FIFO
249
+ * has been filled and becomes non-full due to a transmission
250
+ * occurring there will always be something to receive.
251
+ * IOW, this is safe as TX_FIFO_SIZE + 4 < 2 * TX_FIFO_SIZE
252
+ */
253
+ if (qspi -> rx_len >= 4 ) {
254
+ if (readl_relaxed (qspi -> regs + REG_STATUS ) & STATUS_RXAVAILABLE ) {
255
+ data = readl_relaxed (qspi -> regs + REG_X4_RX_DATA );
256
+ * (u32 * )qspi -> rxbuf = data ;
257
+ qspi -> rxbuf += 4 ;
258
+ qspi -> rx_len -= 4 ;
259
+ }
260
+ }
261
+ }
262
+
263
+ /*
264
+ * Since transmission is not being blocked by clearing the rx FIFO,
265
+ * loop here until all received data "leaked" by the loop above has
266
+ * been dealt with.
267
+ */
268
+ while (qspi -> rx_len >= 4 ) {
269
+ while (readl_relaxed (qspi -> regs + REG_STATUS ) & STATUS_RXFIFOEMPTY )
270
+ ;
271
+ data = readl_relaxed (qspi -> regs + REG_X4_RX_DATA );
272
+ * (u32 * )qspi -> rxbuf = data ;
273
+ qspi -> rxbuf += 4 ;
274
+ qspi -> rx_len -= 4 ;
275
+ }
276
+
277
+ /*
278
+ * Since rx_len and tx_len must be < 4 bytes at this point, there's no
279
+ * concern about overflowing the rx or tx FIFOs any longer. It's
280
+ * therefore safe to loop over the remainder of the transmit data before
281
+ * handling the remaining receive data.
282
+ */
283
+ if (!qspi -> tx_len )
284
+ return ;
285
+
286
+ control &= ~CONTROL_FLAGSX4 ;
287
+ writel_relaxed (control , qspi -> regs + REG_CONTROL );
288
+
289
+ while (qspi -> tx_len -- ) {
290
+ while (readl_relaxed (qspi -> regs + REG_STATUS ) & STATUS_TXFIFOFULL )
291
+ ;
292
+ data = * qspi -> txbuf ++ ;
293
+ writel_relaxed (data , qspi -> regs + REG_TX_DATA );
294
+ }
295
+
296
+ while (qspi -> rx_len -- ) {
297
+ while (readl_relaxed (qspi -> regs + REG_STATUS ) & STATUS_RXFIFOEMPTY )
298
+ ;
299
+ data = readl_relaxed (qspi -> regs + REG_RX_DATA );
300
+ * qspi -> rxbuf ++ = (data & 0xFF );
301
+ }
302
+ }
303
+
304
+
225
305
static void mchp_coreqspi_enable_ints (struct mchp_coreqspi * qspi )
226
306
{
227
307
u32 mask = IEN_TXDONE |
@@ -366,23 +446,13 @@ static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const str
366
446
writel_relaxed (frames , qspi -> regs + REG_FRAMES );
367
447
}
368
448
369
- static int mchp_qspi_wait_for_ready (struct spi_mem * mem )
449
+ static int mchp_coreqspi_wait_for_ready (struct mchp_coreqspi * qspi )
370
450
{
371
- struct mchp_coreqspi * qspi = spi_controller_get_devdata
372
- (mem -> spi -> master );
373
451
u32 status ;
374
- int ret ;
375
452
376
- ret = readl_poll_timeout (qspi -> regs + REG_STATUS , status ,
453
+ return readl_poll_timeout (qspi -> regs + REG_STATUS , status ,
377
454
(status & STATUS_READY ), 0 ,
378
455
TIMEOUT_MS );
379
- if (ret ) {
380
- dev_err (& mem -> spi -> dev ,
381
- "Timeout waiting on QSPI ready.\n" );
382
- return - ETIMEDOUT ;
383
- }
384
-
385
- return ret ;
386
456
}
387
457
388
458
static int mchp_coreqspi_exec_op (struct spi_mem * mem , const struct spi_mem_op * op )
@@ -395,9 +465,11 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
395
465
int err , i ;
396
466
397
467
mutex_lock (& qspi -> op_lock );
398
- err = mchp_qspi_wait_for_ready (mem );
399
- if (err )
468
+ err = mchp_coreqspi_wait_for_ready (qspi );
469
+ if (err ) {
470
+ dev_err (& mem -> spi -> dev , "Timeout waiting on QSPI ready.\n" );
400
471
goto error ;
472
+ }
401
473
402
474
err = mchp_coreqspi_setup_clock (qspi , mem -> spi );
403
475
if (err )
@@ -498,6 +570,108 @@ static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
498
570
.exec_op = mchp_coreqspi_exec_op ,
499
571
};
500
572
573
+ static int mchp_coreqspi_unprepare_message (struct spi_controller * ctlr ,
574
+ struct spi_message * m )
575
+ {
576
+ struct mchp_coreqspi * qspi = spi_controller_get_devdata (ctlr );
577
+
578
+ udelay (750 );
579
+ mutex_unlock (& qspi -> op_lock );
580
+
581
+ return 0 ;
582
+ }
583
+
584
+ static int mchp_coreqspi_prepare_message (struct spi_controller * ctlr ,
585
+ struct spi_message * m )
586
+ {
587
+ struct mchp_coreqspi * qspi = spi_controller_get_devdata (ctlr );
588
+ struct spi_transfer * t = NULL ;
589
+ u32 control , frames ;
590
+ u32 total_bytes = 0 , cmd_bytes = 0 , idle_cycles = 0 ;
591
+ int ret ;
592
+ bool quad = false, dual = false;
593
+
594
+ mutex_lock (& qspi -> op_lock );
595
+ ret = mchp_coreqspi_wait_for_ready (qspi );
596
+ if (ret ) {
597
+ mutex_unlock (& qspi -> op_lock );
598
+ dev_err (& ctlr -> dev , "Timeout waiting on QSPI ready.\n" );
599
+ return ret ;
600
+ }
601
+
602
+ ret = mchp_coreqspi_setup_clock (qspi , m -> spi );
603
+ if (ret ) {
604
+ mutex_unlock (& qspi -> op_lock );
605
+ return ret ;
606
+ }
607
+
608
+ control = readl_relaxed (qspi -> regs + REG_CONTROL );
609
+ control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0 );
610
+ writel_relaxed (control , qspi -> regs + REG_CONTROL );
611
+
612
+ reinit_completion (& qspi -> data_completion );
613
+
614
+ list_for_each_entry (t , & m -> transfers , transfer_list ) {
615
+ total_bytes += t -> len ;
616
+ if ((!cmd_bytes ) && !(t -> tx_buf && t -> rx_buf ))
617
+ cmd_bytes = t -> len ;
618
+ if (!t -> rx_buf )
619
+ cmd_bytes = total_bytes ;
620
+ if (t -> tx_nbits == SPI_NBITS_QUAD || t -> rx_nbits == SPI_NBITS_QUAD )
621
+ quad = true;
622
+ else if (t -> tx_nbits == SPI_NBITS_DUAL || t -> rx_nbits == SPI_NBITS_DUAL )
623
+ dual = true;
624
+ }
625
+
626
+ control = readl_relaxed (qspi -> regs + REG_CONTROL );
627
+ if (quad ) {
628
+ control |= (CONTROL_MODE0 | CONTROL_MODE12_EX_RW );
629
+ } else if (dual ) {
630
+ control &= ~CONTROL_MODE0 ;
631
+ control |= CONTROL_MODE12_FULL ;
632
+ } else {
633
+ control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0 );
634
+ }
635
+ writel_relaxed (control , qspi -> regs + REG_CONTROL );
636
+
637
+ frames = total_bytes & BYTESUPPER_MASK ;
638
+ writel_relaxed (frames , qspi -> regs + REG_FRAMESUP );
639
+ frames = total_bytes & BYTESLOWER_MASK ;
640
+ frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT ;
641
+ frames |= idle_cycles << FRAMES_IDLE_SHIFT ;
642
+ control = readl_relaxed (qspi -> regs + REG_CONTROL );
643
+ if (control & CONTROL_MODE12_MASK )
644
+ frames |= (1 << FRAMES_SHIFT );
645
+
646
+ frames |= FRAMES_FLAGWORD ;
647
+ writel_relaxed (frames , qspi -> regs + REG_FRAMES );
648
+
649
+ return 0 ;
650
+ };
651
+
652
+ static int mchp_coreqspi_transfer_one (struct spi_controller * ctlr , struct spi_device * spi ,
653
+ struct spi_transfer * t )
654
+ {
655
+ struct mchp_coreqspi * qspi = spi_controller_get_devdata (ctlr );
656
+
657
+ if ((t -> tx_buf ) && (t -> rx_buf )){
658
+ qspi -> txbuf = (u8 * )t -> tx_buf ;
659
+ qspi -> rxbuf = (u8 * )t -> rx_buf ;
660
+ qspi -> tx_len = t -> len ;
661
+ mchp_coreqspi_write_read_op (qspi );
662
+ } else if (t -> tx_buf ) {
663
+ qspi -> txbuf = (u8 * )t -> tx_buf ;
664
+ qspi -> tx_len = t -> len ;
665
+ mchp_coreqspi_write_op (qspi );
666
+ } else {
667
+ qspi -> rxbuf = (u8 * )t -> rx_buf ;
668
+ qspi -> rx_len = t -> len ;
669
+ mchp_coreqspi_read_op (qspi );
670
+ }
671
+
672
+ return 0 ;
673
+ }
674
+
501
675
static int mchp_coreqspi_probe (struct platform_device * pdev )
502
676
{
503
677
struct spi_controller * ctlr ;
@@ -552,6 +726,11 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)
552
726
SPI_TX_DUAL | SPI_TX_QUAD ;
553
727
ctlr -> dev .of_node = np ;
554
728
ctlr -> min_speed_hz = clk_get_rate (qspi -> clk ) / 30 ;
729
+ ctlr -> prepare_message = mchp_coreqspi_prepare_message ;
730
+ ctlr -> unprepare_message = mchp_coreqspi_unprepare_message ;
731
+ ctlr -> transfer_one = mchp_coreqspi_transfer_one ;
732
+ ctlr -> num_chipselect = 2 ;
733
+ ctlr -> use_gpio_descriptors = true;
555
734
556
735
ret = devm_spi_register_controller (& pdev -> dev , ctlr );
557
736
if (ret ) {
0 commit comments