1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
//
// AMD SPI controller driver
//
// Copyright (c) 2020, Advanced Micro Devices, Inc.
//
// Author: Sanjay R Mehta <sanju.mehta@amd.com>
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#define AMD_SPI_CTRL0_REG 0x00
#define AMD_SPI_EXEC_CMD BIT(16)
#define AMD_SPI_FIFO_CLEAR BIT(20)
#define AMD_SPI_BUSY BIT(31)
#define AMD_SPI_OPCODE_REG 0x45
#define AMD_SPI_CMD_TRIGGER_REG 0x47
#define AMD_SPI_TRIGGER_CMD BIT(7)
#define AMD_SPI_OPCODE_MASK 0xFF
#define AMD_SPI_ALT_CS_REG 0x1D
#define AMD_SPI_ALT_CS_MASK 0x3
#define AMD_SPI_FIFO_BASE 0x80
#define AMD_SPI_TX_COUNT_REG 0x48
#define AMD_SPI_RX_COUNT_REG 0x4B
#define AMD_SPI_STATUS_REG 0x4C
#define AMD_SPI_ADDR32CTRL_REG 0x50
#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
#define AMD_SPI_MAX_DATA 64
#define AMD_SPI_HID2_DMA_SIZE 4096
#define AMD_SPI_ENA_REG 0x20
#define AMD_SPI_ALT_SPD_SHIFT 20
#define AMD_SPI_ALT_SPD_MASK GENMASK(23, AMD_SPI_ALT_SPD_SHIFT)
#define AMD_SPI_SPI100_SHIFT 0
#define AMD_SPI_SPI100_MASK GENMASK(AMD_SPI_SPI100_SHIFT, AMD_SPI_SPI100_SHIFT)
#define AMD_SPI_SPEED_REG 0x6C
#define AMD_SPI_SPD7_SHIFT 8
#define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT)
#define AMD_SPI_HID2_INPUT_RING_BUF0 0X100
#define AMD_SPI_HID2_CNTRL 0x150
#define AMD_SPI_HID2_INT_STATUS 0x154
#define AMD_SPI_HID2_CMD_START 0x156
#define AMD_SPI_HID2_INT_MASK 0x158
#define AMD_SPI_HID2_READ_CNTRL0 0x170
#define AMD_SPI_HID2_READ_CNTRL1 0x174
#define AMD_SPI_HID2_READ_CNTRL2 0x180
#define AMD_SPI_MAX_HZ 100000000
#define AMD_SPI_MIN_HZ 800000
#define AMD_SPI_IO_SLEEP_US 20
#define AMD_SPI_IO_TIMEOUT_US 2000000
/* SPI read command opcodes */
#define AMD_SPI_OP_READ 0x03 /* Read data bytes (low frequency) */
#define AMD_SPI_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */
#define AMD_SPI_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */
#define AMD_SPI_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */
#define AMD_SPI_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */
#define AMD_SPI_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */
/* SPI read command opcodes - 4B address */
#define AMD_SPI_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */
#define AMD_SPI_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */
#define AMD_SPI_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */
#define AMD_SPI_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */
#define AMD_SPI_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */
/**
* enum amd_spi_versions - SPI controller versions
* @AMD_SPI_V1: AMDI0061 hardware version
* @AMD_SPI_V2: AMDI0062 hardware version
* @AMD_HID2_SPI: AMDI0063 hardware version
*/
enum amd_spi_versions {
AMD_SPI_V1 = 1,
AMD_SPI_V2,
AMD_HID2_SPI,
};
enum amd_spi_speed {
F_66_66MHz,
F_33_33MHz,
F_22_22MHz,
F_16_66MHz,
F_100MHz,
F_800KHz,
SPI_SPD7 = 0x7,
F_50MHz = 0x4,
F_4MHz = 0x32,
F_3_17MHz = 0x3F
};
/**
* struct amd_spi_freq - Matches device speed with values to write in regs
* @speed_hz: Device frequency
* @enable_val: Value to be written to "enable register"
* @spd7_val: Some frequencies requires to have a value written at SPISPEED register
*/
struct amd_spi_freq {
u32 speed_hz;
u32 enable_val;
u32 spd7_val;
};
/**
* struct amd_spi - SPI driver instance
* @io_remap_addr: Start address of the SPI controller registers
* @phy_dma_buf: Physical address of DMA buffer
* @dma_virt_addr: Virtual address of DMA buffer
* @version: SPI controller hardware version
* @speed_hz: Device frequency
*/
struct amd_spi {
void __iomem *io_remap_addr;
dma_addr_t phy_dma_buf;
void *dma_virt_addr;
enum amd_spi_versions version;
unsigned int speed_hz;
};
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
{
return readb((u8 __iomem *)amd_spi->io_remap_addr + idx);
}
static inline void amd_spi_writereg8(struct amd_spi *amd_spi, int idx, u8 val)
{
writeb(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
}
static void amd_spi_setclear_reg8(struct amd_spi *amd_spi, int idx, u8 set, u8 clear)
{
u8 tmp = amd_spi_readreg8(amd_spi, idx);
tmp = (tmp & ~clear) | set;
amd_spi_writereg8(amd_spi, idx, tmp);
}
static inline u16 amd_spi_readreg16(struct amd_spi *amd_spi, int idx)
{
return readw((u8 __iomem *)amd_spi->io_remap_addr + idx);
}
static inline void amd_spi_writereg16(struct amd_spi *amd_spi, int idx, u16 val)
{
writew(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
}
static inline u32 amd_spi_readreg32(struct amd_spi *amd_spi, int idx)
{
return readl((u8 __iomem *)amd_spi->io_remap_addr + idx);
}
static inline void amd_spi_writereg32(struct amd_spi *amd_spi, int idx, u32 val)
{
writel(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
}
static inline u64 amd_spi_readreg64(struct amd_spi *amd_spi, int idx)
{
return readq((u8 __iomem *)amd_spi->io_remap_addr + idx);
}
static inline void amd_spi_writereg64(struct amd_spi *amd_spi, int idx, u64 val)
{
writeq(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
}
static inline void amd_spi_setclear_reg32(struct amd_spi *amd_spi, int idx, u32 set, u32 clear)
{
u32 tmp = amd_spi_readreg32(amd_spi, idx);
tmp = (tmp & ~clear) | set;
amd_spi_writereg32(amd_spi, idx, tmp);
}
static void amd_spi_select_chip(struct amd_spi *amd_spi, u8 cs)
{
amd_spi_setclear_reg8(amd_spi, AMD_SPI_ALT_CS_REG, cs, AMD_SPI_ALT_CS_MASK);
}
static inline void amd_spi_clear_chip(struct amd_spi *amd_spi, u8 chip_select)
{
amd_spi_writereg8(amd_spi, AMD_SPI_ALT_CS_REG, chip_select & ~AMD_SPI_ALT_CS_MASK);
}
static void amd_spi_clear_fifo_ptr(struct amd_spi *amd_spi)
{
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, AMD_SPI_FIFO_CLEAR);
}
static int amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
{
switch (amd_spi->version) {
case AMD_SPI_V1:
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode,
AMD_SPI_OPCODE_MASK);
return 0;
case AMD_SPI_V2:
case AMD_HID2_SPI:
amd_spi_writereg8(amd_spi, AMD_SPI_OPCODE_REG, cmd_opcode);
return 0;
default:
return -ENODEV;
}
}
static inline void amd_spi_set_rx_count(struct amd_spi *amd_spi, u8 rx_count)
{
amd_spi_writereg8(amd_spi, AMD_SPI_RX_COUNT_REG, rx_count);
}
static inline void amd_spi_set_tx_count(struct amd_spi *amd_spi, u8 tx_count)
{
amd_spi_writereg8(amd_spi, AMD_SPI_TX_COUNT_REG, tx_count);
}
static int amd_spi_busy_wait(struct amd_spi *amd_spi)
{
u32 val;
int reg;
switch (amd_spi->version) {
case AMD_SPI_V1:
reg = AMD_SPI_CTRL0_REG;
break;
case AMD_SPI_V2:
case AMD_HID2_SPI:
reg = AMD_SPI_STATUS_REG;
break;
default:
return -ENODEV;
}
return readl_poll_timeout(amd_spi->io_remap_addr + reg, val,
!(val & AMD_SPI_BUSY), 20, 2000000);
}
static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
{
int ret;
ret = amd_spi_busy_wait(amd_spi);
if (ret)
return ret;
switch (amd_spi->version) {
case AMD_SPI_V1:
/* Set ExecuteOpCode bit in the CTRL0 register */
amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD,
AMD_SPI_EXEC_CMD);
return 0;
case AMD_SPI_V2:
case AMD_HID2_SPI:
/* Trigger the command execution */
amd_spi_setclear_reg8(amd_spi, AMD_SPI_CMD_TRIGGER_REG,
AMD_SPI_TRIGGER_CMD, AMD_SPI_TRIGGER_CMD);
return 0;
default:
return -ENODEV;
}
}
static int amd_spi_host_setup(struct spi_device *spi)
{
struct amd_spi *amd_spi = spi_controller_get_devdata(spi->controller);
amd_spi_clear_fifo_ptr(amd_spi);
return 0;
}
static const struct amd_spi_freq amd_spi_freq[] = {
{ AMD_SPI_MAX_HZ, F_100MHz, 0},
{ 66660000, F_66_66MHz, 0},
{ 50000000, SPI_SPD7, F_50MHz},
{ 33330000, F_33_33MHz, 0},
{ 22220000, F_22_22MHz, 0},
{ 16660000, F_16_66MHz, 0},
{ 4000000, SPI_SPD7, F_4MHz},
{ 3170000, SPI_SPD7, F_3_17MHz},
{ AMD_SPI_MIN_HZ, F_800KHz, 0},
};
static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
{
unsigned int i, spd7_val, alt_spd;
if (speed_hz < AMD_SPI_MIN_HZ)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
if (speed_hz >= amd_spi_freq[i].speed_hz)
break;
if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz)
return 0;
amd_spi->speed_hz = amd_spi_freq[i].speed_hz;
alt_spd = (amd_spi_freq[i].enable_val << AMD_SPI_ALT_SPD_SHIFT)
& AMD_SPI_ALT_SPD_MASK;
amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, alt_spd,
AMD_SPI_ALT_SPD_MASK);
if (amd_spi->speed_hz == AMD_SPI_MAX_HZ)
amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, 1,
AMD_SPI_SPI100_MASK);
if (amd_spi_freq[i].spd7_val) {
spd7_val = (amd_spi_freq[i].spd7_val << AMD_SPI_SPD7_SHIFT)
& AMD_SPI_SPD7_MASK;
amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val,
AMD_SPI_SPD7_MASK);
}
return 0;
}
static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
struct spi_controller *host,
struct spi_message *message)
{
struct spi_transfer *xfer = NULL;
struct spi_device *spi = message->spi;
u8 cmd_opcode = 0, fifo_pos = AMD_SPI_FIFO_BASE;
u8 *buf = NULL;
u32 i = 0;
u32 tx_len = 0, rx_len = 0;
list_for_each_entry(xfer, &message->transfers,
transfer_list) {
if (xfer->speed_hz)
amd_set_spi_freq(amd_spi, xfer->speed_hz);
else
amd_set_spi_freq(amd_spi, spi->max_speed_hz);
if (xfer->tx_buf) {
buf = (u8 *)xfer->tx_buf;
if (!tx_len) {
cmd_opcode = *(u8 *)xfer->tx_buf;
buf++;
xfer->len--;
}
tx_len += xfer->len;
/* Write data into the FIFO. */
for (i = 0; i < xfer->len; i++)
amd_spi_writereg8(amd_spi, fifo_pos + i, buf[i]);
fifo_pos += xfer->len;
}
/* Store no. of bytes to be received from FIFO */
if (xfer->rx_buf)
rx_len += xfer->len;
}
if (!buf) {
message->status = -EINVAL;
goto fin_msg;
}
amd_spi_set_opcode(amd_spi, cmd_opcode);
amd_spi_set_tx_count(amd_spi, tx_len);
amd_spi_set_rx_count(amd_spi, rx_len);
/* Execute command */
message->status = amd_spi_execute_opcode(amd_spi);
if (message->status)
goto fin_msg;
if (rx_len) {
message->status = amd_spi_busy_wait(amd_spi);
if (message->status)
goto fin_msg;
list_for_each_entry(xfer, &message->transfers, transfer_list)
if (xfer->rx_buf) {
buf = (u8 *)xfer->rx_buf;
/* Read data from FIFO to receive buffer */
for (i = 0; i < xfer->len; i++)
buf[i] = amd_spi_readreg8(amd_spi, fifo_pos + i);
fifo_pos += xfer->len;
}
}
/* Update statistics */
message->actual_length = tx_len + rx_len + 1;
fin_msg:
switch (amd_spi->version) {
case AMD_SPI_V1:
break;
case AMD_SPI_V2:
case AMD_HID2_SPI:
amd_spi_clear_chip(amd_spi, spi_get_chipselect(message->spi, 0));
break;
default:
return -ENODEV;
}
spi_finalize_current_message(host);
return message->status;
}
static inline bool amd_is_spi_read_cmd_4b(const u16 op)
{
switch (op) {
case AMD_SPI_OP_READ_FAST_4B:
case AMD_SPI_OP_READ_1_1_2_4B:
case AMD_SPI_OP_READ_1_2_2_4B:
case AMD_SPI_OP_READ_1_1_4_4B:
case AMD_SPI_OP_READ_1_4_4_4B:
return true;
default:
return false;
}
}
static inline bool amd_is_spi_read_cmd(const u16 op)
{
switch (op) {
case AMD_SPI_OP_READ:
case AMD_SPI_OP_READ_FAST:
case AMD_SPI_OP_READ_1_1_2:
case AMD_SPI_OP_READ_1_2_2:
case AMD_SPI_OP_READ_1_1_4:
case AMD_SPI_OP_READ_1_4_4:
return true;
default:
return amd_is_spi_read_cmd_4b(op);
}
}
static bool amd_spi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct amd_spi *amd_spi = spi_controller_get_devdata(mem->spi->controller);
/* bus width is number of IO lines used to transmit */
if (op->cmd.buswidth > 1 || op->addr.buswidth > 4)
return false;
/* AMD SPI controllers support quad mode only for read operations */
if (amd_is_spi_read_cmd(op->cmd.opcode)) {
if (op->data.buswidth > 4)
return false;
/*
* HID2 SPI controller supports DMA read up to 4K bytes and
* doesn't support 4-byte address commands.
*/
if (amd_spi->version == AMD_HID2_SPI) {
if (amd_is_spi_read_cmd_4b(op->cmd.opcode) ||
op->data.nbytes > AMD_SPI_HID2_DMA_SIZE)
return false;
} else if (op->data.nbytes > AMD_SPI_MAX_DATA) {
return false;
}
} else if (op->data.buswidth > 1 || op->data.nbytes > AMD_SPI_MAX_DATA) {
return false;
}
return spi_mem_default_supports_op(mem, op);
}
static int amd_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct amd_spi *amd_spi = spi_controller_get_devdata(mem->spi->controller);
/*
* HID2 SPI controller DMA read mode supports reading up to 4k
* bytes in single transaction, where as SPI0 and HID2 SPI
* controller index mode supports maximum of 64 bytes in a single
* transaction.
*/
if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode))
op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_HID2_DMA_SIZE);
else
op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_MAX_DATA);
return 0;
}
static void amd_spi_set_addr(struct amd_spi *amd_spi,
const struct spi_mem_op *op)
{
u8 nbytes = op->addr.nbytes;
u64 addr_val = op->addr.val;
int base_addr, i;
base_addr = AMD_SPI_FIFO_BASE + nbytes;
for (i = 0; i < nbytes; i++) {
amd_spi_writereg8(amd_spi, base_addr - i - 1, addr_val &
GENMASK(7, 0));
addr_val >>= 8;
}
}
static void amd_spi_mem_data_out(struct amd_spi *amd_spi,
const struct spi_mem_op *op)
{
int base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes;
u64 *buf_64 = (u64 *)op->data.buf.out;
u32 nbytes = op->data.nbytes;
u32 left_data = nbytes;
u8 *buf;
int i;
amd_spi_set_opcode(amd_spi, op->cmd.opcode);
amd_spi_set_addr(amd_spi, op);
for (i = 0; left_data >= 8; i++, left_data -= 8)
amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8), *buf_64++);
buf = (u8 *)buf_64;
for (i = 0; i < left_data; i++) {
amd_spi_writereg8(amd_spi, base_addr + op->dummy.nbytes + nbytes + i - left_data,
buf[i]);
}
amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes);
amd_spi_set_rx_count(amd_spi, 0);
amd_spi_clear_fifo_ptr(amd_spi);
amd_spi_execute_opcode(amd_spi);
}
static void amd_spi_hiddma_read(struct amd_spi *amd_spi, const struct spi_mem_op *op)
{
u16 hid_cmd_start, val;
u32 hid_regval;
/* Set the opcode in hid2_read_control0 register */
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_READ_CNTRL0);
hid_regval = (hid_regval & ~GENMASK(7, 0)) | op->cmd.opcode;
/*
* Program the address in the hid2_read_control0 register [8:31]. The address should
* be written starting from the 8th bit of the register, requiring an 8-bit shift.
* Additionally, to convert a 2-byte spinand address to a 3-byte address, another
* 8-bit shift is needed. Therefore, a total shift of 16 bits is required.
*/
hid_regval = (hid_regval & ~GENMASK(31, 8)) | (op->addr.val << 16);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_READ_CNTRL0, hid_regval);
/* Configure dummy clock cycles for fast read, dual, quad I/O commands */
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_READ_CNTRL2);
/* Fast read dummy cycle */
hid_regval &= ~GENMASK(4, 0);
/* Fast read Dual I/O dummy cycle */
hid_regval &= ~GENMASK(12, 8);
/* Fast read Quad I/O dummy cycle */
hid_regval = (hid_regval & ~GENMASK(20, 16)) | BIT(17);
/* Set no of preamble bytecount */
hid_regval &= ~GENMASK(27, 24);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_READ_CNTRL2, hid_regval);
/*
* Program the HID2 Input Ring Buffer0. 4k aligned buf_memory_addr[31:12],
* buf_size[4:0], end_input_ring[5].
*/
hid_regval = amd_spi->phy_dma_buf | BIT(5) | BIT(0);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_INPUT_RING_BUF0, hid_regval);
/* Program max read length(no of DWs) in hid2_read_control1 register */
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_READ_CNTRL1);
hid_regval = (hid_regval & ~GENMASK(15, 0)) | ((op->data.nbytes / 4) - 1);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_READ_CNTRL1, hid_regval);
/* Set cmd start bit in hid2_cmd_start register to trigger HID basic read operation */
hid_cmd_start = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_CMD_START);
amd_spi_writereg16(amd_spi, AMD_SPI_HID2_CMD_START, (hid_cmd_start | BIT(3)));
/* Check interrupt status of HIDDMA basic read operation in hid2_int_status register */
readw_poll_timeout(amd_spi->io_remap_addr + AMD_SPI_HID2_INT_STATUS, val,
(val & BIT(3)), AMD_SPI_IO_SLEEP_US, AMD_SPI_IO_TIMEOUT_US);
/* Clear the interrupts by writing to hid2_int_status register */
val = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_INT_STATUS);
amd_spi_writereg16(amd_spi, AMD_SPI_HID2_INT_STATUS, val);
}
static void amd_spi_mem_data_in(struct amd_spi *amd_spi,
const struct spi_mem_op *op)
{
int base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes;
u64 *buf_64 = (u64 *)op->data.buf.in;
u32 nbytes = op->data.nbytes;
u32 left_data = nbytes;
u32 data;
u8 *buf;
int i;
/*
* Condition for using HID read mode. Only for reading complete page data, use HID read.
* Use index mode otherwise.
*/
if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode)) {
amd_spi_hiddma_read(amd_spi, op);
for (i = 0; left_data >= 8; i++, left_data -= 8)
*buf_64++ = readq((u8 __iomem *)amd_spi->dma_virt_addr + (i * 8));
buf = (u8 *)buf_64;
for (i = 0; i < left_data; i++)
buf[i] = readb((u8 __iomem *)amd_spi->dma_virt_addr +
(nbytes - left_data + i));
/* Reset HID RX memory logic */
data = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, data | BIT(5));
} else {
/* Index mode */
amd_spi_set_opcode(amd_spi, op->cmd.opcode);
amd_spi_set_addr(amd_spi, op);
amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->dummy.nbytes);
for (i = 0; i < op->dummy.nbytes; i++)
amd_spi_writereg8(amd_spi, (base_addr + i), 0xff);
amd_spi_set_rx_count(amd_spi, op->data.nbytes);
amd_spi_clear_fifo_ptr(amd_spi);
amd_spi_execute_opcode(amd_spi);
amd_spi_busy_wait(amd_spi);
for (i = 0; left_data >= 8; i++, left_data -= 8)
*buf_64++ = amd_spi_readreg64(amd_spi, base_addr + op->dummy.nbytes +
(i * 8));
buf = (u8 *)buf_64;
for (i = 0; i < left_data; i++)
buf[i] = amd_spi_readreg8(amd_spi, base_addr + op->dummy.nbytes +
nbytes + i - left_data);
}
}
static void amd_set_spi_addr_mode(struct amd_spi *amd_spi,
const struct spi_mem_op *op)
{
u32 val = amd_spi_readreg32(amd_spi, AMD_SPI_ADDR32CTRL_REG);
if (amd_is_spi_read_cmd_4b(op->cmd.opcode))
amd_spi_writereg32(amd_spi, AMD_SPI_ADDR32CTRL_REG, val | BIT(0));
else
amd_spi_writereg32(amd_spi, AMD_SPI_ADDR32CTRL_REG, val & ~BIT(0));
}
static int amd_spi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct amd_spi *amd_spi;
int ret;
amd_spi = spi_controller_get_devdata(mem->spi->controller);
ret = amd_set_spi_freq(amd_spi, mem->spi->max_speed_hz);
if (ret)
return ret;
if (amd_spi->version == AMD_SPI_V2)
amd_set_spi_addr_mode(amd_spi, op);
switch (op->data.dir) {
case SPI_MEM_DATA_IN:
amd_spi_mem_data_in(amd_spi, op);
break;
case SPI_MEM_DATA_OUT:
fallthrough;
case SPI_MEM_NO_DATA:
amd_spi_mem_data_out(amd_spi, op);
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
static const struct spi_controller_mem_ops amd_spi_mem_ops = {
.exec_op = amd_spi_exec_mem_op,
.adjust_op_size = amd_spi_adjust_op_size,
.supports_op = amd_spi_supports_op,
};
static int amd_spi_host_transfer(struct spi_controller *host,
struct spi_message *msg)
{
struct amd_spi *amd_spi = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
amd_spi_select_chip(amd_spi, spi_get_chipselect(spi, 0));
/*
* Extract spi_transfers from the spi message and
* program the controller.
*/
return amd_spi_fifo_xfer(amd_spi, host, msg);
}
static size_t amd_spi_max_transfer_size(struct spi_device *spi)
{
return AMD_SPI_FIFO_SIZE;
}
static int amd_spi_setup_hiddma(struct amd_spi *amd_spi, struct device *dev)
{
u32 hid_regval;
/* Allocate DMA buffer to use for HID basic read operation */
amd_spi->dma_virt_addr = dma_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE,
&amd_spi->phy_dma_buf, GFP_KERNEL);
if (!amd_spi->dma_virt_addr)
return -ENOMEM;
/*
* Enable interrupts and set mask bits in hid2_int_mask register to generate interrupt
* properly for HIDDMA basic read operations.
*/
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_INT_MASK);
hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(19);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_INT_MASK, hid_regval);
/* Configure buffer unit(4k) in hid2_control register */
hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL);
amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, hid_regval & ~BIT(3));
return 0;
}
static int amd_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *host;
struct amd_spi *amd_spi;
int err;
/* Allocate storage for host and driver private data */
host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
if (!host)
return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
amd_spi = spi_controller_get_devdata(host);
amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(amd_spi->io_remap_addr))
return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
"ioremap of SPI registers failed\n");
dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
amd_spi->version = (uintptr_t) device_get_match_data(dev);
/* Initialize the spi_controller fields */
host->bus_num = (amd_spi->version == AMD_HID2_SPI) ? 2 : 0;
host->num_chipselect = 4;
host->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->max_speed_hz = AMD_SPI_MAX_HZ;
host->min_speed_hz = AMD_SPI_MIN_HZ;
host->setup = amd_spi_host_setup;
host->transfer_one_message = amd_spi_host_transfer;
host->mem_ops = &amd_spi_mem_ops;
host->max_transfer_size = amd_spi_max_transfer_size;
host->max_message_size = amd_spi_max_transfer_size;
/* Register the controller with SPI framework */
err = devm_spi_register_controller(dev, host);
if (err)
return dev_err_probe(dev, err, "error registering SPI controller\n");
if (amd_spi->version == AMD_HID2_SPI)
err = amd_spi_setup_hiddma(amd_spi, dev);
return err;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id spi_acpi_match[] = {
{ "AMDI0061", AMD_SPI_V1 },
{ "AMDI0062", AMD_SPI_V2 },
{ "AMDI0063", AMD_HID2_SPI },
{},
};
MODULE_DEVICE_TABLE(acpi, spi_acpi_match);
#endif
static struct platform_driver amd_spi_driver = {
.driver = {
.name = "amd_spi",
.acpi_match_table = ACPI_PTR(spi_acpi_match),
},
.probe = amd_spi_probe,
};
module_platform_driver(amd_spi_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Sanjay Mehta <sanju.mehta@amd.com>");
MODULE_DESCRIPTION("AMD SPI Master Controller Driver");
|