1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
|
/*
* Copyright 2012-2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DC_INTERFACE_H_
#define DC_INTERFACE_H_
#include "dc_types.h"
#include "dc_state.h"
#include "dc_plane.h"
#include "grph_object_defs.h"
#include "logger_types.h"
#include "hdcp_msg_types.h"
#include "gpio_types.h"
#include "link_service_types.h"
#include "grph_object_ctrl_defs.h"
#include <inc/hw/opp.h>
#include "hwss/hw_sequencer.h"
#include "inc/compressor.h"
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
#include "dml2/dml2_wrapper.h"
#include "dmub/inc/dmub_cmd.h"
#include "spl/dc_spl_types.h"
struct abm_save_restore;
/* forward declaration */
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.310"
#define MAX_SURFACES 3
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
#define MAX_HOST_ROUTERS_NUM 2
/* Display Core Interfaces */
struct dc_versions {
const char *dc_ver;
struct dmcu_version dmcu_version;
};
enum dp_protocol_version {
DP_VERSION_1_4 = 0,
DP_VERSION_2_1,
DP_VERSION_UNKNOWN,
};
enum dc_plane_type {
DC_PLANE_TYPE_INVALID,
DC_PLANE_TYPE_DCE_RGB,
DC_PLANE_TYPE_DCE_UNDERLAY,
DC_PLANE_TYPE_DCN_UNIVERSAL,
};
// Sizes defined as multiples of 64KB
enum det_size {
DET_SIZE_DEFAULT = 0,
DET_SIZE_192KB = 3,
DET_SIZE_256KB = 4,
DET_SIZE_320KB = 5,
DET_SIZE_384KB = 6
};
struct dc_plane_cap {
enum dc_plane_type type;
uint32_t per_pixel_alpha : 1;
struct {
uint32_t argb8888 : 1;
uint32_t nv12 : 1;
uint32_t fp16 : 1;
uint32_t p010 : 1;
uint32_t ayuv : 1;
} pixel_format_support;
// max upscaling factor x1000
// upscaling factors are always >= 1
// for example, 1080p -> 8K is 4.0, or 4000 raw value
struct {
uint32_t argb8888;
uint32_t nv12;
uint32_t fp16;
} max_upscale_factor;
// max downscale factor x1000
// downscale factors are always <= 1
// for example, 8K -> 1080p is 0.25, or 250 raw value
struct {
uint32_t argb8888;
uint32_t nv12;
uint32_t fp16;
} max_downscale_factor;
// minimal width/height
uint32_t min_width;
uint32_t min_height;
};
/**
* DOC: color-management-caps
*
* **Color management caps (DPP and MPC)**
*
* Modules/color calculates various color operations which are translated to
* abstracted HW. DCE 5-12 had almost no important changes, but starting with
* DCN1, every new generation comes with fairly major differences in color
* pipeline. Therefore, we abstract color pipe capabilities so modules/DM can
* decide mapping to HW block based on logical capabilities.
*/
/**
* struct rom_curve_caps - predefined transfer function caps for degamma and regamma
* @srgb: RGB color space transfer func
* @bt2020: BT.2020 transfer func
* @gamma2_2: standard gamma
* @pq: perceptual quantizer transfer function
* @hlg: hybrid log–gamma transfer function
*/
struct rom_curve_caps {
uint16_t srgb : 1;
uint16_t bt2020 : 1;
uint16_t gamma2_2 : 1;
uint16_t pq : 1;
uint16_t hlg : 1;
};
/**
* struct dpp_color_caps - color pipeline capabilities for display pipe and
* plane blocks
*
* @dcn_arch: all DCE generations treated the same
* @input_lut_shared: shared with DGAM. Input LUT is different than most LUTs,
* just plain 256-entry lookup
* @icsc: input color space conversion
* @dgam_ram: programmable degamma LUT
* @post_csc: post color space conversion, before gamut remap
* @gamma_corr: degamma correction
* @hw_3d_lut: 3D LUT support. It implies a shaper LUT before. It may be shared
* with MPC by setting mpc:shared_3d_lut flag
* @ogam_ram: programmable out/blend gamma LUT
* @ocsc: output color space conversion
* @dgam_rom_for_yuv: pre-defined degamma LUT for YUV planes
* @dgam_rom_caps: pre-definied curve caps for degamma 1D LUT
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
*
* Note: hdr_mult and gamut remap (CTM) are always available in DPP (in that order)
*/
struct dpp_color_caps {
uint16_t dcn_arch : 1;
uint16_t input_lut_shared : 1;
uint16_t icsc : 1;
uint16_t dgam_ram : 1;
uint16_t post_csc : 1;
uint16_t gamma_corr : 1;
uint16_t hw_3d_lut : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t dgam_rom_for_yuv : 1;
struct rom_curve_caps dgam_rom_caps;
struct rom_curve_caps ogam_rom_caps;
};
/**
* struct mpc_color_caps - color pipeline capabilities for multiple pipe and
* plane combined blocks
*
* @gamut_remap: color transformation matrix
* @ogam_ram: programmable out gamma LUT
* @ocsc: output color space conversion matrix
* @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
*/
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t num_3dluts : 3;
uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
};
/**
* struct dc_color_caps - color pipes capabilities for DPP and MPC hw blocks
* @dpp: color pipes caps for DPP
* @mpc: color pipes caps for MPC
*/
struct dc_color_caps {
struct dpp_color_caps dpp;
struct mpc_color_caps mpc;
};
struct dc_dmub_caps {
bool psr;
bool mclk_sw;
bool subvp_psr;
bool gecc_enable;
uint8_t fams_ver;
bool aux_backlight_support;
};
struct dc_scl_caps {
bool sharpener_support;
};
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
uint32_t max_audios;
uint32_t max_slave_planes;
uint32_t max_slave_yuv_planes;
uint32_t max_slave_rgb_planes;
uint32_t max_planes;
uint32_t max_downscale_ratio;
uint32_t i2c_speed_in_khz;
uint32_t i2c_speed_in_khz_hdcp;
uint32_t dmdata_alloc_size;
unsigned int max_cursor_size;
unsigned int max_video_width;
/*
* max video plane width that can be safely assumed to be always
* supported by single DPP pipe.
*/
unsigned int max_optimizable_video_width;
unsigned int min_horizontal_blanking_period;
int linear_pitch_alignment;
bool dcc_const_color;
bool dynamic_audio;
bool is_apu;
bool dual_link_dvi;
bool post_blend_color_processing;
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
bool dmcub_support;
bool zstate_support;
bool ips_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
unsigned int mall_size_total;
unsigned int cursor_cache_size;
struct dc_plane_cap planes[MAX_PLANES];
struct dc_color_caps color;
struct dc_dmub_caps dmub_caps;
bool dp_hpo;
bool dp_hdmi21_pcon_support;
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
uint32_t max_otg_num;
uint32_t max_cab_allocation_bytes;
uint32_t cache_line_size;
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
uint8_t subvp_drr_max_vblank_margin_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
uint32_t max_v_total;
uint32_t max_disp_clock_khz_at_vmin;
uint8_t subvp_drr_vblank_start_margin_us;
bool cursor_not_scaled;
bool dcmode_power_limits_present;
bool sequential_ono;
/* Conservative limit for DCC cases which require ODM4:1 to support*/
uint32_t dcc_plane_width_limit;
struct dc_scl_caps scl_caps;
};
struct dc_bug_wa {
bool no_connect_phy_config;
bool dedcn20_305_wa;
bool skip_clock_update;
bool lt_early_cr_pattern;
struct {
uint8_t uclk : 1;
uint8_t fclk : 1;
uint8_t dcfclk : 1;
uint8_t dcfclk_ds: 1;
} clock_update_disable_mask;
bool skip_psr_ips_crtc_disable;
};
struct dc_dcc_surface_param {
struct dc_size surface_size;
enum surface_pixel_format format;
unsigned int plane0_pitch;
struct dc_size plane1_size;
unsigned int plane1_pitch;
union {
enum swizzle_mode_values swizzle_mode;
enum swizzle_mode_addr3_values swizzle_mode_addr3;
};
enum dc_scan_direction scan;
};
struct dc_dcc_setting {
unsigned int max_compressed_blk_size;
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
//These bitfields to be used starting with DCN 3.0
struct {
uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
uint32_t dcc_256_256 : 1; //available in ASICs starting with DCN 4.0x (the best compression case)
uint32_t dcc_256_128 : 1; //available in ASICs starting with DCN 4.0x
uint32_t dcc_256_64 : 1; //available in ASICs starting with DCN 4.0x (the worst compression case)
} dcc_controls;
};
struct dc_surface_dcc_cap {
union {
struct {
struct dc_dcc_setting rgb;
} grph;
struct {
struct dc_dcc_setting luma;
struct dc_dcc_setting chroma;
} video;
};
bool capable;
bool const_color_support;
};
struct dc_static_screen_params {
struct {
bool force_trigger;
bool cursor_update;
bool surface_update;
bool overlay_update;
} triggers;
unsigned int num_frames;
};
/* Surface update type is used by dc_update_surfaces_and_stream
* The update type is determined at the very beginning of the function based
* on parameters passed in and decides how much programming (or updating) is
* going to be done during the call.
*
* UPDATE_TYPE_FAST is used for really fast updates that do not require much
* logical calculations or hardware register programming. This update MUST be
* ISR safe on windows. Currently fast update will only be used to flip surface
* address.
*
* UPDATE_TYPE_MED is used for slower updates which require significant hw
* re-programming however do not affect bandwidth consumption or clock
* requirements. At present, this is the level at which front end updates
* that do not require us to run bw_calcs happen. These are in/out transfer func
* updates, viewport offset changes, recout size changes and pixel depth changes.
* This update can be done at ISR, but we want to minimize how often this happens.
*
* UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
* bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
* end related. Any time viewport dimensions, recout dimensions, scaling ratios or
* gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
* a full update. This cannot be done at ISR level and should be a rare event.
* Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
* underscan we don't expect to see this call at all.
*/
enum surface_update_type {
UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
UPDATE_TYPE_FULL, /* may need to shuffle resources */
};
/* Forward declaration*/
struct dc;
struct dc_plane_state;
struct dc_state;
struct dc_cap_funcs {
bool (*get_dcc_compression_cap)(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output);
bool (*get_subvp_en)(struct dc *dc, struct dc_state *context);
};
struct link_training_settings;
union allow_lttpr_non_transparent_mode {
struct {
bool DP1_4A : 1;
bool DP2_0 : 1;
} bits;
unsigned char raw;
};
/* Structure to hold configuration flags set by dm at dc creation. */
struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
bool fbc_support;
bool disable_fractional_pwm;
bool allow_seamless_boot_optimization;
bool seamless_boot_edp_requested;
bool edp_not_connected;
bool edp_no_power_sequencing;
bool force_enum_edp;
bool forced_clocks;
union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode;
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
bool enable_4to1MPC;
bool enable_windowed_mpo_odm;
bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520
uint32_t allow_edp_hotplug_detection;
bool clamp_min_dcfclk;
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
bool is_vmin_only_asic;
bool use_spl;
bool prefer_easf;
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
bool force_bios_enable_lttpr;
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
bool dc_mode_clk_limit_support;
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
bool usb4_bw_alloc_support;
bool allow_0_dtb_clk;
bool use_assr_psp_message;
bool support_edp0_on_dp1;
unsigned int enable_fpo_flicker_detection;
bool disable_hbr_audio_dp2;
bool consolidated_dpia_dp_lt;
bool set_pipe_unlock_order;
};
enum visual_confirm {
VISUAL_CONFIRM_DISABLE = 0,
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
VISUAL_CONFIRM_MPCTREE = 4,
VISUAL_CONFIRM_PSR = 5,
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
VISUAL_CONFIRM_FAMS2 = 19,
VISUAL_CONFIRM_HW_CURSOR = 20,
};
enum dc_psr_power_opts {
psr_power_opt_invalid = 0x0,
psr_power_opt_smu_opt_static_screen = 0x1,
psr_power_opt_z10_static_screen = 0x10,
psr_power_opt_ds_disable_allow = 0x100,
};
enum dml_hostvm_override_opts {
DML_HOSTVM_NO_OVERRIDE = 0x0,
DML_HOSTVM_OVERRIDE_FALSE = 0x1,
DML_HOSTVM_OVERRIDE_TRUE = 0x2,
};
enum dc_replay_power_opts {
replay_power_opt_invalid = 0x0,
replay_power_opt_smu_opt_static_screen = 0x1,
replay_power_opt_z10_static_screen = 0x10,
};
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
DCC_HALF_REQ_DISALBE = 2,
};
enum in_game_fams_config {
INGAME_FAMS_SINGLE_DISP_ENABLE, // enable in-game fams
INGAME_FAMS_DISABLE, // disable in-game fams
INGAME_FAMS_MULTI_DISP_ENABLE, //enable in-game fams for multi-display
INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY, //enable in-game fams for multi-display only for clamped RR strategies
};
/**
* enum pipe_split_policy - Pipe split strategy supported by DCN
*
* This enum is used to define the pipe split policy supported by DCN. By
* default, DC favors MPC_SPLIT_DYNAMIC.
*/
enum pipe_split_policy {
/**
* @MPC_SPLIT_DYNAMIC: DC will automatically decide how to split the
* pipe in order to bring the best trade-off between performance and
* power consumption. This is the recommended option.
*/
MPC_SPLIT_DYNAMIC = 0,
/**
* @MPC_SPLIT_AVOID: Avoid pipe split, which means that DC will not
* try any sort of split optimization.
*/
MPC_SPLIT_AVOID = 1,
/**
* @MPC_SPLIT_AVOID_MULT_DISP: With this option, DC will only try to
* optimize the pipe utilization when using a single display; if the
* user connects to a second display, DC will avoid pipe split.
*/
MPC_SPLIT_AVOID_MULT_DISP = 2,
};
enum wm_report_mode {
WM_REPORT_DEFAULT = 0,
WM_REPORT_OVERRIDE = 1,
};
enum dtm_pstate{
dtm_level_p0 = 0,/*highest voltage*/
dtm_level_p1,
dtm_level_p2,
dtm_level_p3,
dtm_level_p4,/*when active_display_count = 0*/
};
enum dcn_pwr_state {
DCN_PWR_STATE_UNKNOWN = -1,
DCN_PWR_STATE_MISSION_MODE = 0,
DCN_PWR_STATE_LOW_POWER = 3,
};
enum dcn_zstate_support_state {
DCN_ZSTATE_SUPPORT_UNKNOWN,
DCN_ZSTATE_SUPPORT_ALLOW,
DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY,
DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY,
DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
DCN_ZSTATE_SUPPORT_DISALLOW,
};
/*
* struct dc_clocks - DC pipe clocks
*
* For any clocks that may differ per pipe only the max is stored in this
* structure
*/
struct dc_clocks {
int dispclk_khz;
int actual_dispclk_khz;
int dppclk_khz;
int actual_dppclk_khz;
int disp_dpp_voltage_level_khz;
int dcfclk_khz;
int socclk_khz;
int dcfclk_deep_sleep_khz;
int fclk_khz;
int phyclk_khz;
int dramclk_khz;
bool p_state_change_support;
enum dcn_zstate_support_state zstate_support;
bool dtbclk_en;
int ref_dtbclk_khz;
bool fclk_p_state_change_support;
enum dcn_pwr_state pwr_state;
/*
* Elements below are not compared for the purposes of
* optimization required
*/
bool prev_p_state_change_support;
bool fclk_prev_p_state_change_support;
int num_ways;
int host_router_bw_kbps[MAX_HOST_ROUTERS_NUM];
/*
* @fw_based_mclk_switching
*
* DC has a mechanism that leverage the variable refresh rate to switch
* memory clock in cases that we have a large latency to achieve the
* memory clock change and a short vblank window. DC has some
* requirements to enable this feature, and this field describes if the
* system support or not such a feature.
*/
bool fw_based_mclk_switching;
bool fw_based_mclk_switching_shut_down;
int prev_num_ways;
enum dtm_pstate dtm_level;
int max_supported_dppclk_khz;
int max_supported_dispclk_khz;
int bw_dppclk_khz; /*a copy of dppclk_khz*/
int bw_dispclk_khz;
int idle_dramclk_khz;
int idle_fclk_khz;
};
struct dc_bw_validation_profile {
bool enable;
unsigned long long total_ticks;
unsigned long long voltage_level_ticks;
unsigned long long watermark_ticks;
unsigned long long rq_dlg_ticks;
unsigned long long total_count;
unsigned long long skip_fast_count;
unsigned long long skip_pass_count;
unsigned long long skip_fail_count;
};
#define BW_VAL_TRACE_SETUP() \
unsigned long long end_tick = 0; \
unsigned long long voltage_level_tick = 0; \
unsigned long long watermark_tick = 0; \
unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \
dm_get_timestamp(dc->ctx) : 0
#define BW_VAL_TRACE_COUNT() \
if (dc->debug.bw_val_profile.enable) \
dc->debug.bw_val_profile.total_count++
#define BW_VAL_TRACE_SKIP(status) \
if (dc->debug.bw_val_profile.enable) { \
if (!voltage_level_tick) \
voltage_level_tick = dm_get_timestamp(dc->ctx); \
dc->debug.bw_val_profile.skip_ ## status ## _count++; \
}
#define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \
if (dc->debug.bw_val_profile.enable) \
voltage_level_tick = dm_get_timestamp(dc->ctx)
#define BW_VAL_TRACE_END_WATERMARKS() \
if (dc->debug.bw_val_profile.enable) \
watermark_tick = dm_get_timestamp(dc->ctx)
#define BW_VAL_TRACE_FINISH() \
if (dc->debug.bw_val_profile.enable) { \
end_tick = dm_get_timestamp(dc->ctx); \
dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \
dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \
if (watermark_tick) { \
dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \
dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \
} \
}
union mem_low_power_enable_options {
struct {
bool vga: 1;
bool i2c: 1;
bool dmcu: 1;
bool dscl: 1;
bool cm: 1;
bool mpc: 1;
bool optc: 1;
bool vpg: 1;
bool afmt: 1;
} bits;
uint32_t u32All;
};
union root_clock_optimization_options {
struct {
bool dpp: 1;
bool dsc: 1;
bool hdmistream: 1;
bool hdmichar: 1;
bool dpstream: 1;
bool symclk32_se: 1;
bool symclk32_le: 1;
bool symclk_fe: 1;
bool physymclk: 1;
bool dpiasymclk: 1;
uint32_t reserved: 22;
} bits;
uint32_t u32All;
};
union fine_grain_clock_gating_enable_options {
struct {
bool dccg_global_fgcg_rep : 1; /* Global fine grain clock gating of repeaters */
bool dchub : 1; /* Display controller hub */
bool dchubbub : 1;
bool dpp : 1; /* Display pipes and planes */
bool opp : 1; /* Output pixel processing */
bool optc : 1; /* Output pipe timing combiner */
bool dio : 1; /* Display output */
bool dwb : 1; /* Display writeback */
bool mmhubbub : 1; /* Multimedia hub */
bool dmu : 1; /* Display core management unit */
bool az : 1; /* Azalia */
bool dchvm : 1;
bool dsc : 1; /* Display stream compression */
uint32_t reserved : 19;
} bits;
uint32_t u32All;
};
enum pg_hw_pipe_resources {
PG_HUBP = 0,
PG_DPP,
PG_DSC,
PG_MPCC,
PG_OPP,
PG_OPTC,
PG_DPSTREAM,
PG_HDMISTREAM,
PG_PHYSYMCLK,
PG_HW_PIPE_RESOURCES_NUM_ELEMENT
};
enum pg_hw_resources {
PG_DCCG = 0,
PG_DCIO,
PG_DIO,
PG_DCHUBBUB,
PG_DCHVM,
PG_DWB,
PG_HPO,
PG_HW_RESOURCES_NUM_ELEMENT
};
struct pg_block_update {
bool pg_pipe_res_update[PG_HW_PIPE_RESOURCES_NUM_ELEMENT][MAX_PIPES];
bool pg_res_update[PG_HW_RESOURCES_NUM_ELEMENT];
};
union dpia_debug_options {
struct {
uint32_t disable_dpia:1; /* bit 0 */
uint32_t force_non_lttpr:1; /* bit 1 */
uint32_t extend_aux_rd_interval:1; /* bit 2 */
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
uint32_t reserved:25;
} bits;
uint32_t raw;
};
/* AUX wake work around options
* 0: enable/disable work around
* 1: use default timeout LINK_AUX_WAKE_TIMEOUT_MS
* 15-2: reserved
* 31-16: timeout in ms
*/
union aux_wake_wa_options {
struct {
uint32_t enable_wa : 1;
uint32_t use_default_timeout : 1;
uint32_t rsvd: 14;
uint32_t timeout_ms : 16;
} bits;
uint32_t raw;
};
struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
uint32_t auxErrorCount;
};
struct dc_phy_addr_space_config {
struct {
uint64_t start_addr;
uint64_t end_addr;
uint64_t fb_top;
uint64_t fb_offset;
uint64_t fb_base;
uint64_t agp_top;
uint64_t agp_bot;
uint64_t agp_base;
} system_aperture;
struct {
uint64_t page_table_start_addr;
uint64_t page_table_end_addr;
uint64_t page_table_base_addr;
bool base_addr_is_mc_addr;
} gart_config;
bool valid;
bool is_hvm_enabled;
uint64_t page_table_default_page_addr;
};
struct dc_virtual_addr_space_config {
uint64_t page_table_base_addr;
uint64_t page_table_start_addr;
uint64_t page_table_end_addr;
uint32_t page_table_block_size_in_bytes;
uint8_t page_table_depth; // 1 = 1 level, 2 = 2 level, etc. 0 = invalid
};
struct dc_bounding_box_overrides {
int sr_exit_time_ns;
int sr_enter_plus_exit_time_ns;
int sr_exit_z8_time_ns;
int sr_enter_plus_exit_z8_time_ns;
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
int dummy_clock_change_latency_ns;
int fclk_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
*/
int min_dcfclk_mhz;
};
struct dc_state;
struct resource_pool;
struct dce_hwseq;
struct link_service;
/*
* struct dc_debug_options - DC debug struct
*
* This struct provides a simple mechanism for developers to change some
* configurations, enable/disable features, and activate extra debug options.
* This can be very handy to narrow down whether some specific feature is
* causing an issue or not.
*/
struct dc_debug_options {
bool native422_support;
bool disable_dsc;
enum visual_confirm visual_confirm;
int visual_confirm_rect_height;
bool sanity_checks;
bool max_disp_clk;
bool surface_trace;
bool clock_trace;
bool validation_trace;
bool bandwidth_calcs_trace;
int max_downscale_src_width;
/* stutter efficiency related */
bool disable_stutter;
bool use_max_lb;
enum dcc_option disable_dcc;
/*
* @pipe_split_policy: Define which pipe split policy is used by the
* display core.
*/
enum pipe_split_policy pipe_split_policy;
bool force_single_disp_pipe_split;
bool voltage_align_fclk;
bool disable_min_fclk;
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
bool disable_hubp_power_gate;
bool disable_dsc_power_gate;
bool disable_optc_power_gate;
bool disable_hpo_power_gate;
int dsc_min_slice_height_override;
int dsc_bpp_increment_div;
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
unsigned int min_disp_clk_khz;
unsigned int min_dpp_clk_khz;
unsigned int min_dram_clk_khz;
int sr_exit_time_dpm0_ns;
int sr_enter_plus_exit_time_dpm0_ns;
int sr_exit_time_ns;
int sr_enter_plus_exit_time_ns;
int sr_exit_z8_time_ns;
int sr_enter_plus_exit_z8_time_ns;
int urgent_latency_ns;
uint32_t underflow_assert_delay_us;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
bool optimized_watermark;
int always_scale;
bool disable_pplib_clock_request;
bool disable_clock_gate;
bool disable_mem_low_power;
bool pstate_enabled;
bool disable_dmcu;
bool force_abm_enable;
bool disable_stereo_support;
bool vsr_support;
bool performance_trace;
bool az_endpoint_mute_only;
bool always_use_regamma;
bool recovery_enabled;
bool avoid_vbios_exec_table;
bool scl_reset_length10;
bool hdmi20_disable;
bool skip_detection_link_training;
uint32_t edid_read_retry_times;
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
int minimum_z8_residency_time;
int minimum_z10_residency_time;
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
bool ips_disallow_entry;
bool dmub_offload_enabled;
bool dmcub_emulation;
bool disable_idle_power_optimizations;
unsigned int mall_size_override;
unsigned int mall_additional_timer_percent;
bool mall_error_as_fatal;
bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
bool disable_fec;
bool disable_48mhz_pwrdwn;
/* This forces a hard min on the DCFCLK requested to SMU/PP
* watermarks are not affected.
*/
unsigned int force_min_dcfclk_mhz;
int dwb_fi_phase;
bool disable_timing_sync;
bool cm_in_bypass;
int force_clock_mode;/*every mode change.*/
bool disable_dram_clock_change_vactive_support;
bool validate_dml_output;
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
bool enable_dram_clock_change_one_display_vactive;
/* TODO - remove once tested */
bool legacy_dp2_lt;
bool set_mst_en_for_sst;
bool disable_uhbr;
bool force_dp2_lt_fallback_method;
bool ignore_cable_id;
union mem_low_power_enable_options enable_mem_low_power;
union root_clock_optimization_options root_clock_optimization;
union fine_grain_clock_gating_enable_options enable_fine_grain_clock_gating;
bool hpo_optimization;
bool force_vblank_alignment;
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool disable_fams;
enum in_game_fams_config disable_fams_gaming;
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
enum det_size crb_alloc_policy;
int crb_alloc_policy_min_disp_count;
bool disable_z10;
bool enable_z9_disable_interface;
bool psr_skip_crtc_disable;
uint32_t ips_skip_crtc_disable_mask;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
uint32_t fixed_vs_aux_delay_config_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
bool allow_sw_cursor_fallback;
unsigned int force_subvp_num_ways;
unsigned int force_mall_ss_num_ways;
bool alloc_extra_way_for_cursor;
uint32_t subvp_extra_lines;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
bool extended_blank_optimization;
union aux_wake_wa_options aux_wake_wa;
uint32_t mst_start_top_delay;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
bool dml_disallow_alternate_prefetch_modes;
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool using_dml2;
bool enable_single_display_2to1_odm_policy;
bool enable_double_buffered_dsc_pg_support;
bool enable_dp_dig_pixel_rate_div_policy;
bool using_dml21;
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
unsigned int min_prefetch_in_strobe_ns;
bool disable_unbounded_requesting;
bool dig_fifo_off_in_blank;
bool override_dispclk_programming;
bool otg_crc_db;
bool disallow_dispclk_dppclk_ds;
bool disable_fpo_optimizations;
bool support_eDP1_5;
uint32_t fpo_vactive_margin_us;
bool disable_fpo_vactive;
bool disable_boot_optimizations;
bool override_odm_optimization;
bool minimize_dispclk_using_odm;
bool disable_subvp_high_refresh;
bool disable_dp_plus_plus_wa;
uint32_t fpo_vactive_min_active_margin_us;
uint32_t fpo_vactive_max_blank_us;
bool enable_hpo_pg_support;
bool enable_legacy_fast_update;
bool disable_dc_mode_overwrite;
bool replay_skip_crtc_disabled;
bool ignore_pg;/*do nothing, let pmfw control it*/
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
bool optimize_ips_handshake;
bool disable_dmub_reallow_idle;
bool disable_timeout;
bool disable_extblankadj;
bool enable_idle_reg_checks;
unsigned int static_screen_wait_frames;
uint32_t pwm_freq;
bool force_chroma_subsampling_1tap;
unsigned int dcc_meta_propagation_delay_us;
bool disable_422_left_edge_pixel;
bool dml21_force_pstate_method;
uint32_t dml21_force_pstate_method_values[MAX_PIPES];
uint32_t dml21_disable_pstate_method_mask;
union dmub_fams2_global_feature_config fams2_config;
bool enable_legacy_clock_update;
unsigned int force_cositing;
unsigned int disable_spl;
unsigned int force_easf;
unsigned int force_sharpness;
unsigned int force_sharpness_level;
unsigned int force_lls;
bool notify_dpia_hr_bw;
bool enable_ips_visual_confirm;
unsigned int sharpen_policy;
unsigned int scale_to_sharpness_policy;
bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
};
/* Generic structure that can be used to query properties of DC. More fields
* can be added as required.
*/
struct dc_current_properties {
unsigned int cursor_size_limit;
};
enum frame_buffer_mode {
FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
FRAME_BUFFER_MODE_ZFB_ONLY,
FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL,
} ;
struct dchub_init_data {
int64_t zfb_phys_addr_base;
int64_t zfb_mc_base_addr;
uint64_t zfb_size_in_byte;
enum frame_buffer_mode fb_mode;
bool dchub_initialzied;
bool dchub_info_valid;
};
struct dml2_soc_bb;
struct dc_init_data {
struct hw_asic_id asic_id;
void *driver; /* ctx */
struct cgs_device *cgs_device;
struct dc_bounding_box_overrides bb_overrides;
int num_virtual_links;
/*
* If 'vbios_override' not NULL, it will be called instead
* of the real VBIOS. Intended use is Diagnostics on FPGA.
*/
struct dc_bios *vbios_override;
enum dce_environment dce_environment;
struct dmub_offload_funcs *dmub_if;
struct dc_reg_helper_state *dmub_offload;
struct dc_config flags;
uint64_t log_mask;
struct dpcd_vendor_signature vendor_signature;
bool force_smu_not_present;
/*
* IP offset for run time initializaion of register addresses
*
* DCN3.5+ will fail dc_create() if these fields are null for them. They are
* applicable starting with DCN32/321 and are not used for ASICs upstreamed
* before them.
*/
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
uint32_t *clk_reg_offsets;
struct dml2_soc_bb *bb_from_dmub;
};
struct dc_callback_init {
struct cp_psp cp_psp;
};
struct dc *dc_create(const struct dc_init_data *init_params);
void dc_hardware_init(struct dc *dc);
int dc_get_vmid_use_vector(struct dc *dc);
void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);
/* Returns the number of vmids supported */
int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_config);
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params);
void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
/* Surface Interfaces */
enum {
TRANSFER_FUNC_POINTS = 1025
};
struct dc_hdr_static_metadata {
/* display chromaticities and white point in units of 0.00001 */
unsigned int chromaticity_green_x;
unsigned int chromaticity_green_y;
unsigned int chromaticity_blue_x;
unsigned int chromaticity_blue_y;
unsigned int chromaticity_red_x;
unsigned int chromaticity_red_y;
unsigned int chromaticity_white_point_x;
unsigned int chromaticity_white_point_y;
uint32_t min_luminance;
uint32_t max_luminance;
uint32_t maximum_content_light_level;
uint32_t maximum_frame_average_light_level;
};
enum dc_transfer_func_type {
TF_TYPE_PREDEFINED,
TF_TYPE_DISTRIBUTED_POINTS,
TF_TYPE_BYPASS,
TF_TYPE_HWPWL
};
struct dc_transfer_func_distributed_points {
struct fixed31_32 red[TRANSFER_FUNC_POINTS];
struct fixed31_32 green[TRANSFER_FUNC_POINTS];
struct fixed31_32 blue[TRANSFER_FUNC_POINTS];
uint16_t end_exponent;
uint16_t x_point_at_y1_red;
uint16_t x_point_at_y1_green;
uint16_t x_point_at_y1_blue;
};
enum dc_transfer_func_predefined {
TRANSFER_FUNCTION_SRGB,
TRANSFER_FUNCTION_BT709,
TRANSFER_FUNCTION_PQ,
TRANSFER_FUNCTION_LINEAR,
TRANSFER_FUNCTION_UNITY,
TRANSFER_FUNCTION_HLG,
TRANSFER_FUNCTION_HLG12,
TRANSFER_FUNCTION_GAMMA22,
TRANSFER_FUNCTION_GAMMA24,
TRANSFER_FUNCTION_GAMMA26
};
struct dc_transfer_func {
struct kref refcount;
enum dc_transfer_func_type type;
enum dc_transfer_func_predefined tf;
/* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
uint32_t sdr_ref_white_level;
union {
struct pwl_params pwl;
struct dc_transfer_func_distributed_points tf_pts;
};
};
union dc_3dlut_state {
struct {
uint32_t initialized:1; /*if 3dlut is went through color module for initialization */
uint32_t rmu_idx_valid:1; /*if mux settings are valid*/
uint32_t rmu_mux_num:3; /*index of mux to use*/
uint32_t mpc_rmu0_mux:4; /*select mpcc on mux, one of the following : mpcc0, mpcc1, mpcc2, mpcc3*/
uint32_t mpc_rmu1_mux:4;
uint32_t mpc_rmu2_mux:4;
uint32_t reserved:15;
} bits;
uint32_t raw;
};
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
struct fixed31_32 hdr_multiplier;
union dc_3dlut_state state;
};
/*
* This structure is filled in by dc_surface_get_status and contains
* the last requested address and the currently active address so the called
* can determine if there are any outstanding flips
*/
struct dc_plane_status {
struct dc_plane_address requested_address;
struct dc_plane_address current_address;
bool is_flip_pending;
bool is_right_eye;
};
union surface_update_flags {
struct {
uint32_t addr_update:1;
/* Medium updates */
uint32_t dcc_change:1;
uint32_t color_space_change:1;
uint32_t horizontal_mirror_change:1;
uint32_t per_pixel_alpha_change:1;
uint32_t global_alpha_change:1;
uint32_t hdr_mult:1;
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
uint32_t coeff_reduction_change:1;
uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
uint32_t gamut_remap_change:1;
/* Full updates */
uint32_t new_plane:1;
uint32_t bpp_change:1;
uint32_t gamma_change:1;
uint32_t bandwidth_change:1;
uint32_t clock_change:1;
uint32_t stereo_format_change:1;
uint32_t lut_3d:1;
uint32_t tmz_changed:1;
uint32_t mcm_transfer_function_enable_change:1; /* disable or enable MCM transfer func */
uint32_t full_update:1;
uint32_t sdr_white_level_nits:1;
} bits;
uint32_t raw;
};
#define DC_REMOVE_PLANE_POINTERS 1
struct dc_plane_state {
struct dc_plane_address address;
struct dc_plane_flip_time time;
bool triplebuffer_flips;
struct scaling_taps scaling_quality;
struct rect src_rect;
struct rect dst_rect;
struct rect clip_rect;
struct plane_size plane_size;
union dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
struct dc_gamma gamma_correction;
struct dc_transfer_func in_transfer_func;
struct dc_bias_and_scale bias_and_scale;
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
struct fixed31_32 hdr_mult;
struct colorspace_transform gamut_remap_matrix;
// TODO: No longer used, remove
struct dc_hdr_static_metadata hdr_static_ctx;
enum dc_color_space color_space;
struct dc_3dlut lut3d_func;
struct dc_transfer_func in_shaper_func;
struct dc_transfer_func blend_tf;
struct dc_transfer_func *gamcor_tf;
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
bool is_tiling_rotated;
bool per_pixel_alpha;
bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool visible;
bool flip_immediate;
bool horizontal_mirror;
int layer_index;
union surface_update_flags update_flags;
bool flip_int_enabled;
bool skip_manual_trigger;
/* private to DC core */
struct dc_plane_status status;
struct dc_context *ctx;
/* HACK: Workaround for forcing full reprogramming under some conditions */
bool force_full_update;
bool is_phantom; // TODO: Change mall_stream_config into mall_plane_config instead
/* private to dc_surface.c */
enum dc_irq_source irq_source;
struct kref refcount;
struct tg_color visual_confirm_color;
bool is_statically_allocated;
enum chroma_cositing cositing;
enum dc_cm2_shaper_3dlut_setting mcm_shaper_3dlut_setting;
bool mcm_lut1d_enable;
struct dc_cm2_func_luts mcm_luts;
bool lut_bank_a;
enum mpcc_movable_cm_location mcm_location;
struct dc_csc_transform cursor_csc_color_matrix;
bool adaptive_sharpness_en;
int adaptive_sharpness_policy;
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
unsigned int sdr_white_level_nits;
};
struct dc_plane_info {
struct plane_size plane_size;
union dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
enum dc_color_space color_space;
bool horizontal_mirror;
bool visible;
bool per_pixel_alpha;
bool pre_multiplied_alpha;
bool global_alpha;
int global_alpha_value;
bool input_csc_enabled;
int layer_index;
enum chroma_cositing cositing;
};
#include "dc_stream.h"
struct dc_scratch_space {
/* used to temporarily backup plane states of a stream during
* dc update. The reason is that plane states are overwritten
* with surface updates in dc update. Once they are overwritten
* current state is no longer valid. We want to temporarily
* store current value in plane states so we can still recover
* a valid current state during dc update.
*/
struct dc_plane_state plane_states[MAX_SURFACE_NUM];
struct dc_stream_state stream_state;
};
struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
struct dc_caps caps;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_bounding_box_overrides bb_overrides;
struct dc_bug_wa work_arounds;
struct dc_context *ctx;
struct dc_phy_addr_space_config vm_pa_config;
uint8_t link_count;
struct dc_link *links[MAX_LINKS];
struct link_service *link_srv;
struct dc_state *current_state;
struct resource_pool *res_pool;
struct clk_mgr *clk_mgr;
/* Display Engine Clock levels */
struct dm_pp_clock_levels sclk_lvls;
/* Inputs into BW and WM calculations. */
struct bw_calcs_dceip *bw_dceip;
struct bw_calcs_vbios *bw_vbios;
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
struct display_mode_lib dml;
/* HW functions */
struct hw_sequencer_funcs hwss;
struct dce_hwseq *hwseq;
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
/* FBC compressor */
struct compressor *fbc_compressor;
struct dc_debug_data debug_data;
struct dpcd_vendor_signature vendor_signature;
const char *build_id;
struct vm_helper *vm_helper;
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
uint32_t *clk_reg_offsets;
/* Scratch memory */
struct {
struct {
/*
* For matching clock_limits table in driver with table
* from PMFW.
*/
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
} update_bw_bounding_box;
struct dc_scratch_space current_state;
struct dc_scratch_space new_state;
struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */
} scratch;
struct dml2_configuration_options dml2_options;
struct dml2_configuration_options dml2_tmp;
enum dc_acpi_cm_power_state power_state;
};
struct dc_scaling_info {
struct rect src_rect;
struct rect dst_rect;
struct rect clip_rect;
struct scaling_taps scaling_quality;
};
struct dc_fast_update {
const struct dc_flip_addrs *flip_addr;
const struct dc_gamma *gamma;
const struct colorspace_transform *gamut_remap_matrix;
const struct dc_csc_transform *input_csc_color_matrix;
const struct fixed31_32 *coeff_reduction_factor;
struct dc_transfer_func *out_transfer_func;
struct dc_csc_transform *output_csc_transform;
const struct dc_csc_transform *cursor_csc_color_matrix;
};
struct dc_surface_update {
struct dc_plane_state *surface;
/* isr safe update parameters. null means no updates */
const struct dc_flip_addrs *flip_addr;
const struct dc_plane_info *plane_info;
const struct dc_scaling_info *scaling_info;
struct fixed31_32 hdr_mult;
/* following updates require alloc/sleep/spin that is not isr safe,
* null means no updates
*/
const struct dc_gamma *gamma;
const struct dc_transfer_func *in_transfer_func;
const struct dc_csc_transform *input_csc_color_matrix;
const struct fixed31_32 *coeff_reduction_factor;
const struct dc_transfer_func *func_shaper;
const struct dc_3dlut *lut3d_func;
const struct dc_transfer_func *blend_tf;
const struct colorspace_transform *gamut_remap_matrix;
/*
* Color Transformations for pre-blend MCM (Shaper, 3DLUT, 1DLUT)
*
* change cm2_params.component_settings: Full update
* change cm2_params.cm2_luts: Fast update
*/
const struct dc_cm2_parameters *cm2_params;
const struct dc_csc_transform *cursor_csc_color_matrix;
unsigned int sdr_white_level_nits;
};
/*
* Create a new surface with default parameters;
*/
void dc_gamma_retain(struct dc_gamma *dc_gamma);
void dc_gamma_release(struct dc_gamma **dc_gamma);
struct dc_gamma *dc_create_gamma(void);
void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);
void dc_transfer_func_release(struct dc_transfer_func *dc_tf);
struct dc_transfer_func *dc_create_transfer_func(void);
struct dc_3dlut *dc_create_3dlut_func(void);
void dc_3dlut_func_release(struct dc_3dlut *lut);
void dc_3dlut_func_retain(struct dc_3dlut *lut);
void dc_post_update_surfaces_to_stream(
struct dc *dc);
#include "dc_stream.h"
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
struct dc_validation_set {
/**
* @stream: Stream state properties
*/
struct dc_stream_state *stream;
/**
* @plane_states: Surface state
*/
struct dc_plane_state *plane_states[MAX_SURFACES];
/**
* @plane_count: Total of active planes
*/
uint8_t plane_count;
};
bool dc_validate_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing);
enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
bool fast_validate);
bool dc_set_generic_gpio_for_stereo(bool enable,
struct gpio_service *gpio_service);
/*
* fast_validate: we return after determining if we can support the new state,
* but before we populate the programming info
*/
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
bool fast_validate);
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count);
void populate_fast_updates(struct dc_fast_update *fast_update,
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_update *stream_update);
/*
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
*
* After this call:
* Phy, Encoder, Timing Generator are programmed and enabled.
* New streams are enabled with blank stream; no memory read.
*/
enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params);
struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
struct dc_stream_state *stream,
int mpcc_inst);
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
void dc_set_disable_128b_132b_stream_overhead(bool disable);
/* The function returns minimum bandwidth required to drive a given timing
* return - minimum required timing bandwidth in kbps.
*/
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing,
const enum dc_link_encoding_format link_encoding);
/* Link Interfaces */
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
*/
struct dc_link {
struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
unsigned int sink_count;
struct dc_sink *local_sink;
unsigned int link_index;
enum dc_connection_type type;
enum signal_type connector_signal;
enum dc_irq_source irq_source_hpd;
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
bool is_hpd_filter_disabled;
bool dp_ss_off;
/**
* @link_state_valid:
*
* If there is no link and local sink, this variable should be set to
* false. Otherwise, it should be set to true; usually, the function
* core_link_enable_stream sets this field to true.
*/
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
bool skip_stream_reenable;
bool is_internal_display;
/** @todo Rename. Flag an endpoint as having a programmable mapping to a DIG encoder. */
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
bool is_hpd_pending; /* Indicates a new received hpd */
/* USB4 DPIA links skip verifying link cap, instead performing the fallback method
* for every link training. This is incompatible with DP LL compliance automation,
* which expects the same link settings to be used every retry on a link loss.
* This flag is used to skip the fallback when link loss occurs during automation.
*/
bool skip_fallback_on_link_loss;
bool edp_sink_present;
struct dp_trace dp_trace;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
*/
struct dc_link_settings reported_link_cap;
struct dc_link_settings verified_link_cap;
struct dc_link_settings cur_link_settings;
struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
struct dc_link_settings preferred_link_setting;
/* preferred_training_settings are override values that
* come from DM. DM is responsible for the memory
* management of the override pointers.
*/
struct dc_link_training_overrides preferred_training_settings;
struct dp_audio_test_data audio_test_data;
uint8_t ddc_hw_inst;
uint8_t hpd_src;
uint8_t link_enc_hw_inst;
/* DIG link encoder ID. Used as index in link encoder resource pool.
* For links with fixed mapping to DIG, this is not changed after dc_link
* object creation.
*/
enum engine_id eng_id;
enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled;
/* Pending/Current test pattern are only used to perform and track
* FIXED_VS retimer test pattern/lane adjustment override state.
* Pending allows link HWSS to differentiate PHY vs non-PHY pattern,
* to perform specific lane adjust overrides before setting certain
* PHY test patterns. In cases when lane adjust and set test pattern
* calls are not performed atomically (i.e. performing link training),
* pending_test_pattern will be invalid or contain a non-PHY test pattern
* and current_test_pattern will contain required context for any future
* set pattern/set lane adjust to transition between override state(s).
* */
enum dp_test_pattern current_test_pattern;
enum dp_test_pattern pending_test_pattern;
union compliance_test_state compliance_test_state;
void *priv;
struct ddc_service *ddc;
enum dp_panel_mode panel_mode;
bool aux_mode;
/* Private to DC core */
const struct dc *dc;
struct dc_context *ctx;
struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
struct graphics_object_id link_id;
/* Endpoint type distinguishes display endpoints which do not have entries
* in the BIOS connector table from those that do. Helps when tracking link
* encoder to display endpoint assignments.
*/
enum display_endpoint_type ep_type;
union ddi_channel_mapping ddi_channel_mapping;
struct connector_device_tag_info device_tag;
struct dpcd_caps dpcd_caps;
uint32_t dongle_max_pix_clk;
unsigned short chip_caps;
unsigned int dpcd_sink_count;
struct hdcp_caps hdcp_caps;
enum edp_revision edp_revision;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
struct psr_settings psr_settings;
struct replay_settings replay_settings;
/* Drive settings read from integrated info table */
struct dc_lane_settings bios_forced_drive_settings;
/* Vendor specific LTTPR workaround variables */
uint8_t vendor_specific_lttpr_link_rate_wa;
bool apply_vendor_specific_lttpr_link_rate_wa;
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
bool dp_skip_reset_segment;
bool dp_skip_fs_144hz;
bool dp_mot_reset_segment;
/* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
bool dpia_mst_dsc_always_on;
/* Forced DPIA into TBT3 compatibility mode. */
bool dpia_forced_tbt3_mode;
bool dongle_mode_timing_override;
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
struct dc_link_status link_status;
struct dprx_states dprx_states;
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
struct dc_panel_config panel_config;
struct phy_state phy_state;
// BW ALLOCATON USB4 ONLY
struct dc_dpia_bw_alloc dpia_bw_alloc_config;
bool skip_implict_edp_power_control;
enum backlight_control_type backlight_control_type;
};
/* Return an enumerated dc_link.
* dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
* Use dc_get_caps() to get number of links.
*/
struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index);
/* Return instance id of the edp link. Inst 0 is primary edp link. */
bool dc_get_edp_link_panel_inst(const struct dc *dc,
const struct dc_link *link,
unsigned int *inst_out);
/* Return an array of link pointers to edp links. */
void dc_get_edp_links(const struct dc *dc,
struct dc_link **edp_links,
int *edp_num);
void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
bool powerOn);
/* The function initiates detection handshake over the given link. It first
* determines if there are display connections over the link. If so it initiates
* detection protocols supported by the connected receiver device. The function
* contains protocol specific handshake sequences which are sometimes mandatory
* to establish a proper connection between TX and RX. So it is always
* recommended to call this function as the first link operation upon HPD event
* or power up event. Upon completion, the function will update link structure
* in place based on latest RX capabilities. The function may also cause dpms
* to be reset to off for all currently enabled streams to the link. It is DM's
* responsibility to serialize detection and DPMS updates.
*
* @reason - Indicate which event triggers this detection. dc may customize
* detection flow depending on the triggering events.
* return false - if detection is not fully completed. This could happen when
* there is an unrecoverable error during detection or detection is partially
* completed (detection has been delegated to dm mst manager ie.
* link->connection_type == dc_connection_mst_branch when returning false).
* return true - detection is completed, link has been fully updated with latest
* detection result.
*/
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason);
struct dc_sink_init_data;
/* When link connection type is dc_connection_mst_branch, remote sink can be
* added to the link. The interface creates a remote sink and associates it with
* current link. The sink will be retained by link until remove remote sink is
* called.
*
* @dc_link - link the remote sink will be added to.
* @edid - byte array of EDID raw data.
* @len - size of the edid in byte
* @init_data -
*/
struct dc_sink *dc_link_add_remote_sink(
struct dc_link *dc_link,
const uint8_t *edid,
int len,
struct dc_sink_init_data *init_data);
/* Remove remote sink from a link with dc_connection_mst_branch connection type.
* @link - link the sink should be removed from
* @sink - sink to be removed.
*/
void dc_link_remove_remote_sink(
struct dc_link *link,
struct dc_sink *sink);
/* Enable HPD interrupt handler for a given link */
void dc_link_enable_hpd(const struct dc_link *link);
/* Disable HPD interrupt handler for a given link */
void dc_link_disable_hpd(const struct dc_link *link);
/* determine if there is a sink connected to the link
*
* @type - dc_connection_single if connected, dc_connection_none otherwise.
* return - false if an unexpected error occurs, true otherwise.
*
* NOTE: This function doesn't detect downstream sink connections i.e
* dc_connection_mst_branch, dc_connection_sst_branch. In this case, it will
* return dc_connection_single if the branch device is connected despite of
* downstream sink's connection status.
*/
bool dc_link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type);
/* query current hpd pin value
* return - true HPD is asserted (HPD high), false otherwise (HPD low)
*
*/
bool dc_link_get_hpd_state(struct dc_link *link);
/* Getter for cached link status from given link */
const struct dc_link_status *dc_link_get_status(const struct dc_link *link);
/* enable/disable hardware HPD filter.
*
* @link - The link the HPD pin is associated with.
* @enable = true - enable hardware HPD filter. HPD event will only queued to irq
* handler once after no HPD change has been detected within dc default HPD
* filtering interval since last HPD event. i.e if display keeps toggling hpd
* pulses within default HPD interval, no HPD event will be received until HPD
* toggles have stopped. Then HPD event will be queued to irq handler once after
* dc default HPD filtering interval since last HPD event.
*
* @enable = false - disable hardware HPD filter. HPD event will be queued
* immediately to irq handler after no HPD change has been detected within
* IRQ_HPD (aka HPD short pulse) interval (i.e 2ms).
*/
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
/* submit i2c read/write payloads through ddc channel
* @link_index - index to a link with ddc in i2c mode
* @cmd - i2c command structure
* return - true if success, false otherwise.
*/
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
struct i2c_command *cmd);
/* submit i2c read/write payloads through oem channel
* @link_index - index to a link with ddc in i2c mode
* @cmd - i2c command structure
* return - true if success, false otherwise.
*/
bool dc_submit_i2c_oem(
struct dc *dc,
struct i2c_command *cmd);
enum aux_return_code_type;
/* Attempt to transfer the given aux payload. This function does not perform
* retries or handle error states. The reply is returned in the payload->reply
* and the result through operation_result. Returns the number of bytes
* transferred,or -1 on a failure.
*/
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
bool dc_is_oem_i2c_device_present(
struct dc *dc,
size_t slave_address
);
/* return true if the connected receiver supports the hdcp version */
bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
/* Notify DC about DP RX Interrupt (aka DP IRQ_HPD).
*
* TODO - When defer_handling is true the function will have a different purpose.
* It no longer does complete hpd rx irq handling. We should create a separate
* interface specifically for this case.
*
* Return:
* true - Downstream port status changed. DM should call DC to do the
* detection.
* false - no change in Downstream port status. No further action required
* from DM.
*/
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
bool defer_handling, bool *has_left_work);
/* handle DP specs define test automation sequence*/
void dc_link_dp_handle_automated_test(struct dc_link *link);
/* handle DP Link loss sequence and try to recover RX link loss with best
* effort
*/
void dc_link_dp_handle_link_loss(struct dc_link *link);
/* Determine if hpd rx irq should be handled or ignored
* return true - hpd rx irq should be handled.
* return false - it is safe to ignore hpd rx irq event
*/
bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
/* Determine if link loss is indicated with a given hpd_irq_dpcd_data.
* @link - link the hpd irq data associated with
* @hpd_irq_dpcd_data - input hpd irq data
* return - true if hpd irq data indicates a link lost
*/
bool dc_link_check_link_loss_status(struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data);
/* Read hpd rx irq data from a given link
* @link - link where the hpd irq data should be read from
* @irq_data - output hpd irq data
* return - DC_OK if hpd irq data is read successfully, otherwise hpd irq data
* read has failed.
*/
enum dc_status dc_link_dp_read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data);
/* The function clears recorded DP RX states in the link. DM should call this
* function when it is resuming from S3 power state to previously connected links.
*
* TODO - in the future we should consider to expand link resume interface to
* support clearing previous rx states. So we don't have to rely on dm to call
* this interface explicitly.
*/
void dc_link_clear_dprx_states(struct dc_link *link);
/* Destruct the mst topology of the link and reset the allocated payload table
*
* NOTE: this should only be called if DM chooses not to call dc_link_detect but
* still wants to reset MST topology on an unplug event */
bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link);
/* The function calculates effective DP link bandwidth when a given link is
* using the given link settings.
*
* return - total effective link bandwidth in kbps.
*/
uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
/* The function takes a snapshot of current link resource allocation state
* @dc: pointer to dc of the dm calling this
* @map: a dc link resource snapshot defined internally to dc.
*
* DM needs to capture a snapshot of current link resource allocation mapping
* and store it in its persistent storage.
*
* Some of the link resource is using first come first serve policy.
* The allocation mapping depends on original hotplug order. This information
* is lost after driver is loaded next time. The snapshot is used in order to
* restore link resource to its previous state so user will get consistent
* link capability allocation across reboot.
*
*/
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
/* This function restores link resource allocation state from a snapshot
* @dc: pointer to dc of the dm calling this
* @map: a dc link resource snapshot defined internally to dc.
*
* DM needs to call this function after initial link detection on boot and
* before first commit streams to restore link resource allocation state
* from previous boot session.
*
* Some of the link resource is using first come first serve policy.
* The allocation mapping depends on original hotplug order. This information
* is lost after driver is loaded next time. The snapshot is used in order to
* restore link resource to its previous state so user will get consistent
* link capability allocation across reboot.
*
*/
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
/* TODO: this is not meant to be exposed to DM. Should switch to stream update
* interface i.e stream_update->dsc_config
*/
bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);
/* translate a raw link rate data to bandwidth in kbps */
uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(const struct dc *dc, uint8_t bw);
/* determine the optimal bandwidth given link and required bw.
* @link - current detected link
* @req_bw - requested bandwidth in kbps
* @link_settings - returned most optimal link settings that can fit the
* requested bandwidth
* return - false if link can't support requested bandwidth, true if link
* settings is found.
*/
bool dc_link_decide_edp_link_settings(struct dc_link *link,
struct dc_link_settings *link_settings,
uint32_t req_bw);
/* return the max dp link settings can be driven by the link without considering
* connected RX device and its capability
*/
bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link,
struct dc_link_settings *max_link_enc_cap);
/* determine when the link is driving MST mode, what DP link channel coding
* format will be used. The decision will remain unchanged until next HPD event.
*
* @link - a link with DP RX connection
* return - if stream is committed to this link with MST signal type, type of
* channel coding format dc will choose.
*/
enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(
const struct dc_link *link);
/* get max dp link settings the link can enable with all things considered. (i.e
* TX/RX/Cable capabilities and dp override policies.
*
* @link - a link with DP RX connection
* return - max dp link settings the link can enable.
*
*/
const struct dc_link_settings *dc_link_get_link_cap(const struct dc_link *link);
/* Get the highest encoding format that the link supports; highest meaning the
* encoding format which supports the maximum bandwidth.
*
* @link - a link with DP RX connection
* return - highest encoding format link supports.
*/
enum dc_link_encoding_format dc_link_get_highest_encoding_format(const struct dc_link *link);
/* Check if a RX (ex. DP sink, MST hub, passive or active dongle) is connected
* to a link with dp connector signal type.
* @link - a link with dp connector signal type
* return - true if connected, false otherwise
*/
bool dc_link_is_dp_sink_present(struct dc_link *link);
/* Force DP lane settings update to main-link video signal and notify the change
* to DP RX via DPCD. This is a debug interface used for video signal integrity
* tuning purpose. The interface assumes link has already been enabled with DP
* signal.
*
* @lt_settings - a container structure with desired hw_lane_settings
*/
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
struct dc_link *link);
/* Enable a test pattern in Link or PHY layer in an active link for compliance
* test or debugging purpose. The test pattern will remain until next un-plug.
*
* @link - active link with DP signal output enabled.
* @test_pattern - desired test pattern to output.
* NOTE: set to DP_TEST_PATTERN_VIDEO_MODE to disable previous test pattern.
* @test_pattern_color_space - for video test pattern choose a desired color
* space.
* @p_link_settings - For PHY pattern choose a desired link settings
* @p_custom_pattern - some test pattern will require a custom input to
* customize some pattern details. Otherwise keep it to NULL.
* @cust_pattern_size - size of the custom pattern input.
*
*/
bool dc_link_dp_set_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
enum dp_test_pattern_color_space test_pattern_color_space,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
/* Force DP link settings to always use a specific value until reboot to a
* specific link. If link has already been enabled, the interface will also
* switch to desired link settings immediately. This is a debug interface to
* generic dp issue trouble shooting.
*/
void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_link_settings *link_setting,
struct dc_link *link);
/* Force DP link to customize a specific link training behavior by overriding to
* standard DP specs defined protocol. This is a debug interface to trouble shoot
* display specific link training issues or apply some display specific
* workaround in link training.
*
* @link_settings - if not NULL, force preferred link settings to the link.
* @lt_override - a set of override pointers. If any pointer is none NULL, dc
* will apply this particular override in future link training. If NULL is
* passed in, dc resets previous overrides.
* NOTE: DM must keep the memory from override pointers until DM resets preferred
* training settings.
*/
void dc_link_set_preferred_training_settings(struct dc *dc,
struct dc_link_settings *link_setting,
struct dc_link_training_overrides *lt_overrides,
struct dc_link *link,
bool skip_immediate_retrain);
/* return - true if FEC is supported with connected DP RX, false otherwise */
bool dc_link_is_fec_supported(const struct dc_link *link);
/* query FEC enablement policy to determine if FEC will be enabled by dc during
* link enablement.
* return - true if FEC should be enabled, false otherwise.
*/
bool dc_link_should_enable_fec(const struct dc_link *link);
/* determine lttpr mode the current link should be enabled with a specific link
* settings.
*/
enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
struct dc_link_settings *link_setting);
/* Force DP RX to update its power state.
* NOTE: this interface doesn't update dp main-link. Calling this function will
* cause DP TX main-link and DP RX power states out of sync. DM has to restore
* RX power state back upon finish DM specific execution requiring DP RX in a
* specific power state.
* @on - true to set DP RX in D0 power state, false to set DP RX in D3 power
* state.
*/
void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
/* Force link to read base dp receiver caps from dpcd 000h - 00Fh and overwrite
* current value read from extended receiver cap from 02200h - 0220Fh.
* Some DP RX has problems of providing accurate DP receiver caps from extended
* field, this interface is a workaround to revert link back to use base caps.
*/
void dc_link_overwrite_extended_receiver_cap(
struct dc_link *link);
void dc_link_edp_panel_backlight_power_on(struct dc_link *link,
bool wait_for_hpd);
/* Set backlight level of an embedded panel (eDP, LVDS).
* backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
* and 16 bit fractional, where 1.0 is max backlight value.
*/
bool dc_link_set_backlight_level(const struct dc_link *dc_link,
struct set_backlight_level_params *backlight_level_params);
/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
bool dc_link_set_backlight_level_nits(struct dc_link *link,
bool isHDR,
uint32_t backlight_millinits,
uint32_t transition_time_in_ms);
bool dc_link_get_backlight_level_nits(struct dc_link *link,
uint32_t *backlight_millinits,
uint32_t *backlight_millinits_peak);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
int dc_link_get_target_backlight_pwm(const struct dc_link *link);
bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
bool wait, bool force_static, const unsigned int *power_opts);
bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
bool dc_link_setup_psr(struct dc_link *dc_link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context);
/*
* Communicate with DMUB to allow or disallow Panel Replay on the specified link:
*
* @link: pointer to the dc_link struct instance
* @enable: enable(active) or disable(inactive) replay
* @wait: state transition need to wait the active set completed.
* @force_static: force disable(inactive) the replay
* @power_opts: set power optimazation parameters to DMUB.
*
* return: allow Replay active will return true, else will return false.
*/
bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
bool wait, bool force_static, const unsigned int *power_opts);
bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
/* On eDP links this function call will stall until T12 has elapsed.
* If the panel is not in power off state, this function will return
* immediately.
*/
bool dc_link_wait_for_t12(struct dc_link *link);
/* Determine if dp trace has been initialized to reflect upto date result *
* return - true if trace is initialized and has valid data. False dp trace
* doesn't have valid result.
*/
bool dc_dp_trace_is_initialized(struct dc_link *link);
/* Query a dp trace flag to indicate if the current dp trace data has been
* logged before
*/
bool dc_dp_trace_is_logged(struct dc_link *link,
bool in_detection);
/* Set dp trace flag to indicate whether DM has already logged the current dp
* trace data. DM can set is_logged to true upon logging and check
* dc_dp_trace_is_logged before logging to avoid logging the same result twice.
*/
void dc_dp_trace_set_is_logged_flag(struct dc_link *link,
bool in_detection,
bool is_logged);
/* Obtain driver time stamp for last dp link training end. The time stamp is
* formatted based on dm_get_timestamp DM function.
* @in_detection - true to get link training end time stamp of last link
* training in detection sequence. false to get link training end time stamp
* of last link training in commit (dpms) sequence
*/
unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
bool in_detection);
/* Get how many link training attempts dc has done with latest sequence.
* @in_detection - true to get link training count of last link
* training in detection sequence. false to get link training count of last link
* training in commit (dpms) sequence
*/
const struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
/* Get how many link loss has happened since last link training attempts */
unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
/*
* USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS
*/
/*
* Send a request from DP-Tx requesting to allocate BW remotely after
* allocating it locally. This will get processed by CM and a CB function
* will be called.
*
* @link: pointer to the dc_link struct instance
* @req_bw: The requested bw in Kbyte to allocated
*
* return: none
*/
void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
/*
* Handle function for when the status of the Request above is complete.
* We will find out the result of allocating on CM and update structs.
*
* @link: pointer to the dc_link struct instance
* @bw: Allocated or Estimated BW depending on the result
* @result: Response type
*
* return: none
*/
void dc_link_handle_usb4_bw_alloc_response(struct dc_link *link,
uint8_t bw, uint8_t result);
/*
* Handle the USB4 BW Allocation related functionality here:
* Plug => Try to allocate max bw from timing parameters supported by the sink
* Unplug => de-allocate bw
*
* @link: pointer to the dc_link struct instance
* @peak_bw: Peak bw used by the link/sink
*
* return: allocated bw else return 0
*/
int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw);
/*
* Validate the BW of all the valid DPIA links to make sure it doesn't exceed
* available BW for each host router
*
* @dc: pointer to dc struct
* @stream: pointer to all possible streams
* @count: number of valid DPIA streams
*
* return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
*/
bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
const unsigned int count);
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
// 128bit GUID in binary form
unsigned char guid[16];
// 8 byte port ID -> ELD.PortID
unsigned int portId[2];
// 128bit GUID in binary formufacturer name -> ELD.ManufacturerName
unsigned short manufacturerName;
// 2 byte product code -> ELD.ProductCode
unsigned short productCode;
};
struct dc_sink_dsc_caps {
// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
// 'false' if they are sink's DSC caps
bool is_virtual_dpcd_dsc;
// 'true' if MST topology supports DSC passthrough for sink
// 'false' if MST topology does not support DSC passthrough
bool is_dsc_passthrough_supported;
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
struct dc_sink_fec_caps {
bool is_rx_fec_supported;
bool is_topology_fec_supported;
};
struct scdc_caps {
union hdmi_scdc_manufacturer_OUI_data manufacturer_OUI;
union hdmi_scdc_device_id_data device_id;
};
/*
* The sink structure contains EDID and other display device properties
*/
struct dc_sink {
enum signal_type sink_signal;
struct dc_edid dc_edid; /* raw edid */
struct dc_edid_caps edid_caps; /* parse display caps */
struct dc_container_id *dc_container_id;
uint32_t dongle_max_pix_clk;
void *priv;
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
struct scdc_caps scdc_caps;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
bool is_vsc_sdp_colorimetry_supported;
/* private to DC core */
struct dc_link *link;
struct dc_context *ctx;
uint32_t sink_id;
/* private to dc_sink.c */
// refcount must be the last member in dc_sink, since we want the
// sink structure to be logically cloneable up to (but not including)
// refcount
struct kref refcount;
};
void dc_sink_retain(struct dc_sink *sink);
void dc_sink_release(struct dc_sink *sink);
struct dc_sink_init_data {
enum signal_type sink_signal;
struct dc_link *link;
uint32_t dongle_max_pix_clk;
bool converter_disable_audio;
};
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
/* Newer interfaces */
struct dc_cursor {
struct dc_plane_address address;
struct dc_cursor_attributes attributes;
};
/* Interrupt interfaces */
enum dc_irq_source dc_interrupt_to_irq_source(
struct dc *dc,
uint32_t src_id,
uint32_t ext_id);
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
enum dc_irq_source dc_get_hpd_irq_source_at_index(
struct dc *dc, uint32_t link_index);
void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable);
/* Power Interfaces */
void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state);
void dc_resume(struct dc *dc);
void dc_power_down_on_boot(struct dc *dc);
/*
* HDCP Interfaces
*/
enum hdcp_message_status dc_process_hdcp_msg(
enum signal_type signal,
struct dc_link *link,
struct hdcp_protection_message *message_info);
bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
unsigned int pitch,
unsigned int height,
enum surface_pixel_format format,
struct dc_cursor_attributes *cursor_attr);
#define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__)
#define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__)
void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, const char *caller_name);
void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
void dc_unlock_memory_clock_frequency(struct dc *dc);
/* set min memory clock to the min required for current mode, max to maxDPM */
void dc_lock_memory_clock_frequency(struct dc *dc);
/* set soft max for memclk, to be used for AC/DC switching clock limitations */
void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable);
/* cleanup on driver unload */
void dc_hardware_release(struct dc *dc);
/* disables fw based mclk switch */
void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
bool dc_set_replay_allow_active(struct dc *dc, bool active);
bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips);
void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc);
bool dc_is_dmub_outbox_supported(struct dc *dc);
bool dc_enable_dmub_notifications(struct dc *dc);
bool dc_abm_save_restore(
struct dc *dc,
struct dc_stream_state *stream,
struct abm_save_restore *pData);
void dc_enable_dmub_outbox(struct dc *dc);
bool dc_process_dmub_aux_transfer_async(struct dc *dc,
uint32_t link_index,
struct aux_payload *payload);
/* Get dc link index from dpia port index */
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index);
bool dc_process_dmub_set_config_async(struct dc *dc,
uint32_t link_index,
struct set_config_cmd_payload *payload,
struct dmub_notification *notify);
enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint32_t link_index,
uint8_t mst_alloc_slots,
uint8_t *mst_slots_in_use);
void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps);
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable);
void dc_print_dmub_diagnostic_data(const struct dc *dc);
void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties);
struct dc_power_profile {
int power_level; /* Lower is better */
};
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context);
/* DSC Interfaces */
#include "dc_dsc.h"
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
#endif /* DC_INTERFACE_H_ */
|