1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
|
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Intel SCU IPC mechanism
*
* (C) Copyright 2008-2010,2015 Intel Corporation
* Author: Sreedhara DS (sreedhara.ds@intel.com)
*
* SCU running in ARC processor communicates with other entity running in IA
* core through IPC mechanism which in turn messaging between IA core ad SCU.
* SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
* SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
* IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
* along with other APIs.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
/* Command id associated with message IPCMSG_PCNTRL */
#define IPC_CMD_PCNTRL_W 0 /* Register write */
#define IPC_CMD_PCNTRL_R 1 /* Register read */
#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
/*
* IPC register summary
*
* IPC register blocks are memory mapped at fixed address of PCI BAR 0.
* To read or write information to the SCU, driver writes to IPC-1 memory
* mapped registers. The following is the IPC mechanism
*
* 1. IA core cDMI interface claims this transaction and converts it to a
* Transaction Layer Packet (TLP) message which is sent across the cDMI.
*
* 2. South Complex cDMI block receives this message and writes it to
* the IPC-1 register block, causing an interrupt to the SCU
*
* 3. SCU firmware decodes this interrupt and IPC message and the appropriate
* message handler is called within firmware.
*/
#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
#define IPC_IOC 0x100 /* IPC command register IOC bit */
struct intel_scu_ipc_dev {
struct device dev;
struct resource mem;
struct module *owner;
int irq;
void __iomem *ipc_base;
struct completion cmd_complete;
};
#define IPC_STATUS 0x04
#define IPC_STATUS_IRQ BIT(2)
#define IPC_STATUS_ERR BIT(1)
#define IPC_STATUS_BUSY BIT(0)
/*
* IPC Write/Read Buffers:
* 16 byte buffer for sending and receiving data to and from SCU.
*/
#define IPC_WRITE_BUFFER 0x80
#define IPC_READ_BUFFER 0x90
/* Timeout in jiffies */
#define IPC_TIMEOUT (10 * HZ)
static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
static struct class intel_scu_ipc_class = {
.name = "intel_scu_ipc",
};
/**
* intel_scu_ipc_dev_get() - Get SCU IPC instance
*
* The recommended new API takes SCU IPC instance as parameter and this
* function can be called by driver to get the instance. This also makes
* sure the driver providing the IPC functionality cannot be unloaded
* while the caller has the instance.
*
* Call intel_scu_ipc_dev_put() to release the instance.
*
* Returns %NULL if SCU IPC is not currently available.
*/
struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
{
struct intel_scu_ipc_dev *scu = NULL;
mutex_lock(&ipclock);
if (ipcdev) {
get_device(&ipcdev->dev);
/*
* Prevent the IPC provider from being unloaded while it
* is being used.
*/
if (!try_module_get(ipcdev->owner))
put_device(&ipcdev->dev);
else
scu = ipcdev;
}
mutex_unlock(&ipclock);
return scu;
}
EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
/**
* intel_scu_ipc_dev_put() - Put SCU IPC instance
* @scu: SCU IPC instance
*
* This function releases the SCU IPC instance retrieved from
* intel_scu_ipc_dev_get() and allows the driver providing IPC to be
* unloaded.
*/
void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
{
if (scu) {
module_put(scu->owner);
put_device(&scu->dev);
}
}
EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
struct intel_scu_ipc_devres {
struct intel_scu_ipc_dev *scu;
};
static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
{
struct intel_scu_ipc_devres *dr = res;
struct intel_scu_ipc_dev *scu = dr->scu;
intel_scu_ipc_dev_put(scu);
}
/**
* devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
* @dev: Device requesting the SCU IPC device
*
* The recommended new API takes SCU IPC instance as parameter and this
* function can be called by driver to get the instance. This also makes
* sure the driver providing the IPC functionality cannot be unloaded
* while the caller has the instance.
*
* Returns %NULL if SCU IPC is not currently available.
*/
struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
{
struct intel_scu_ipc_devres *dr;
struct intel_scu_ipc_dev *scu;
dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return NULL;
scu = intel_scu_ipc_dev_get();
if (!scu) {
devres_free(dr);
return NULL;
}
dr->scu = scu;
devres_add(dev, dr);
return scu;
}
EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
/*
* Send ipc command
* Command Register (Write Only):
* A write to this register results in an interrupt to the SCU core processor
* Format:
* |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
*/
static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
{
reinit_completion(&scu->cmd_complete);
writel(cmd | IPC_IOC, scu->ipc_base);
}
/*
* Write ipc data
* IPC Write Buffer (Write Only):
* 16-byte buffer for sending data associated with IPC command to
* SCU. Size of the data is specified in the IPC_COMMAND_REG register
*/
static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
{
writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
}
/*
* Status Register (Read Only):
* Driver will read this register to get the ready/busy status of the IPC
* block and error status of the IPC command that was just processed by SCU
* Format:
* |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
*/
static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
{
return __raw_readl(scu->ipc_base + IPC_STATUS);
}
/* Read ipc byte data */
static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
{
return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
}
/* Read ipc u32 data */
static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
{
return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
}
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
unsigned long end = jiffies + IPC_TIMEOUT;
do {
u32 status;
status = ipc_read_status(scu);
if (!(status & IPC_STATUS_BUSY))
return (status & IPC_STATUS_ERR) ? -EIO : 0;
usleep_range(50, 100);
} while (time_before(jiffies, end));
return -ETIMEDOUT;
}
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
return -ETIMEDOUT;
status = ipc_read_status(scu);
if (status & IPC_STATUS_ERR)
return -EIO;
return 0;
}
static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
{
return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
}
/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
u32 count, u32 op, u32 id)
{
int nc;
u32 offset = 0;
int err;
u8 cbuf[IPC_WWBUF_SIZE];
u32 *wbuf = (u32 *)&cbuf;
memset(cbuf, 0, sizeof(cbuf));
mutex_lock(&ipclock);
if (!scu)
scu = ipcdev;
if (!scu) {
mutex_unlock(&ipclock);
return -ENODEV;
}
for (nc = 0; nc < count; nc++, offset += 2) {
cbuf[offset] = addr[nc];
cbuf[offset + 1] = addr[nc] >> 8;
}
if (id == IPC_CMD_PCNTRL_R) {
for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
ipc_data_writel(scu, wbuf[nc], offset);
ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
} else if (id == IPC_CMD_PCNTRL_W) {
for (nc = 0; nc < count; nc++, offset += 1)
cbuf[offset] = data[nc];
for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
ipc_data_writel(scu, wbuf[nc], offset);
ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
} else if (id == IPC_CMD_PCNTRL_M) {
cbuf[offset] = data[0];
cbuf[offset + 1] = data[1];
ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
}
err = intel_scu_ipc_check_status(scu);
if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
/* Workaround: values are read as 0 without memcpy_fromio */
memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
for (nc = 0; nc < count; nc++)
data[nc] = ipc_data_readb(scu, nc);
}
mutex_unlock(&ipclock);
return err;
}
/**
* intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
* @scu: Optional SCU IPC instance
* @addr: Register on SCU
* @data: Return pointer for read byte
*
* Read a single register. Returns %0 on success or an error code. All
* locking between SCU accesses is handled for the caller.
*
* This function may sleep.
*/
int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
{
return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
/**
* intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
* @scu: Optional SCU IPC instance
* @addr: Register on SCU
* @data: Byte to write
*
* Write a single register. Returns %0 on success or an error code. All
* locking between SCU accesses is handled for the caller.
*
* This function may sleep.
*/
int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
{
return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
/**
* intel_scu_ipc_dev_readv() - Read a set of registers
* @scu: Optional SCU IPC instance
* @addr: Register list
* @data: Bytes to return
* @len: Length of array
*
* Read registers. Returns %0 on success or an error code. All locking
* between SCU accesses is handled for the caller.
*
* The largest array length permitted by the hardware is 5 items.
*
* This function may sleep.
*/
int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
size_t len)
{
return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
/**
* intel_scu_ipc_dev_writev() - Write a set of registers
* @scu: Optional SCU IPC instance
* @addr: Register list
* @data: Bytes to write
* @len: Length of array
*
* Write registers. Returns %0 on success or an error code. All locking
* between SCU accesses is handled for the caller.
*
* The largest array length permitted by the hardware is 5 items.
*
* This function may sleep.
*/
int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
size_t len)
{
return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
/**
* intel_scu_ipc_dev_update() - Update a register
* @scu: Optional SCU IPC instance
* @addr: Register address
* @data: Bits to update
* @mask: Mask of bits to update
*
* Read-modify-write power control unit register. The first data argument
* must be register value and second is mask value mask is a bitmap that
* indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
* modify this bit. returns %0 on success or an error code.
*
* This function may sleep. Locking between SCU accesses is handled
* for the caller.
*/
int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
u8 mask)
{
u8 tmp[2] = { data, mask };
return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_update);
/**
* intel_scu_ipc_dev_simple_command() - Send a simple command
* @scu: Optional SCU IPC instance
* @cmd: Command
* @sub: Sub type
*
* Issue a simple command to the SCU. Do not use this interface if you must
* then access data as any data values may be overwritten by another SCU
* access by the time this function returns.
*
* This function may sleep. Locking for SCU accesses is handled for the
* caller.
*/
int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
int sub)
{
u32 cmdval;
int err;
mutex_lock(&ipclock);
if (!scu)
scu = ipcdev;
if (!scu) {
mutex_unlock(&ipclock);
return -ENODEV;
}
scu = ipcdev;
cmdval = sub << 12 | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
mutex_unlock(&ipclock);
if (err)
dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
return err;
}
EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
/**
* intel_scu_ipc_dev_command_with_size() - Command with data
* @scu: Optional SCU IPC instance
* @cmd: Command
* @sub: Sub type
* @in: Input data
* @inlen: Input length in bytes
* @size: Input size written to the IPC command register in whatever
* units (dword, byte) the particular firmware requires. Normally
* should be the same as @inlen.
* @out: Output data
* @outlen: Output length in bytes
*
* Issue a command to the SCU which involves data transfers. Do the
* data copies under the lock but leave it for the caller to interpret.
*/
int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
int sub, const void *in, size_t inlen,
size_t size, void *out, size_t outlen)
{
size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
u32 cmdval, inbuf[4] = {};
int i, err;
if (inbuflen > 4 || outbuflen > 4)
return -EINVAL;
mutex_lock(&ipclock);
if (!scu)
scu = ipcdev;
if (!scu) {
mutex_unlock(&ipclock);
return -ENODEV;
}
memcpy(inbuf, in, inlen);
for (i = 0; i < inbuflen; i++)
ipc_data_writel(scu, inbuf[i], 4 * i);
cmdval = (size << 16) | (sub << 12) | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
if (!err) {
u32 outbuf[4] = {};
for (i = 0; i < outbuflen; i++)
outbuf[i] = ipc_data_readl(scu, 4 * i);
memcpy(out, outbuf, outlen);
}
mutex_unlock(&ipclock);
if (err)
dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
return err;
}
EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
* When ioc bit is set to 1, caller api must wait for interrupt handler called
* which in turn unlocks the caller api. Currently this is not used
*
* This is edge triggered so we need take no action to clear anything
*/
static irqreturn_t ioc(int irq, void *dev_id)
{
struct intel_scu_ipc_dev *scu = dev_id;
int status = ipc_read_status(scu);
writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
complete(&scu->cmd_complete);
return IRQ_HANDLED;
}
static void intel_scu_ipc_release(struct device *dev)
{
struct intel_scu_ipc_dev *scu;
scu = container_of(dev, struct intel_scu_ipc_dev, dev);
if (scu->irq > 0)
free_irq(scu->irq, scu);
iounmap(scu->ipc_base);
release_mem_region(scu->mem.start, resource_size(&scu->mem));
kfree(scu);
}
/**
* __intel_scu_ipc_register() - Register SCU IPC device
* @parent: Parent device
* @scu_data: Data used to configure SCU IPC
* @owner: Module registering the SCU IPC device
*
* Call this function to register SCU IPC mechanism under @parent.
* Returns pointer to the new SCU IPC device or ERR_PTR() in case of
* failure. The caller may use the returned instance if it needs to do
* SCU IPC calls itself.
*/
struct intel_scu_ipc_dev *
__intel_scu_ipc_register(struct device *parent,
const struct intel_scu_ipc_data *scu_data,
struct module *owner)
{
int err;
struct intel_scu_ipc_dev *scu;
void __iomem *ipc_base;
mutex_lock(&ipclock);
/* We support only one IPC */
if (ipcdev) {
err = -EBUSY;
goto err_unlock;
}
scu = kzalloc(sizeof(*scu), GFP_KERNEL);
if (!scu) {
err = -ENOMEM;
goto err_unlock;
}
scu->owner = owner;
scu->dev.parent = parent;
scu->dev.class = &intel_scu_ipc_class;
scu->dev.release = intel_scu_ipc_release;
if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
"intel_scu_ipc")) {
err = -EBUSY;
goto err_free;
}
ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
if (!ipc_base) {
err = -ENOMEM;
goto err_release;
}
scu->ipc_base = ipc_base;
scu->mem = scu_data->mem;
scu->irq = scu_data->irq;
init_completion(&scu->cmd_complete);
if (scu->irq > 0) {
err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
if (err)
goto err_unmap;
}
/*
* After this point intel_scu_ipc_release() takes care of
* releasing the SCU IPC resources once refcount drops to zero.
*/
dev_set_name(&scu->dev, "intel_scu_ipc");
err = device_register(&scu->dev);
if (err) {
put_device(&scu->dev);
goto err_unlock;
}
/* Assign device at last */
ipcdev = scu;
mutex_unlock(&ipclock);
return scu;
err_unmap:
iounmap(ipc_base);
err_release:
release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
err_free:
kfree(scu);
err_unlock:
mutex_unlock(&ipclock);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
/**
* intel_scu_ipc_unregister() - Unregister SCU IPC
* @scu: SCU IPC handle
*
* This unregisters the SCU IPC device and releases the acquired
* resources once the refcount goes to zero.
*/
void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
{
mutex_lock(&ipclock);
if (!WARN_ON(!ipcdev)) {
ipcdev = NULL;
device_unregister(&scu->dev);
}
mutex_unlock(&ipclock);
}
EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
{
struct intel_scu_ipc_devres *dr = res;
struct intel_scu_ipc_dev *scu = dr->scu;
intel_scu_ipc_unregister(scu);
}
/**
* __devm_intel_scu_ipc_register() - Register managed SCU IPC device
* @parent: Parent device
* @scu_data: Data used to configure SCU IPC
* @owner: Module registering the SCU IPC device
*
* Call this function to register managed SCU IPC mechanism under
* @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
* case of failure. The caller may use the returned instance if it needs
* to do SCU IPC calls itself.
*/
struct intel_scu_ipc_dev *
__devm_intel_scu_ipc_register(struct device *parent,
const struct intel_scu_ipc_data *scu_data,
struct module *owner)
{
struct intel_scu_ipc_devres *dr;
struct intel_scu_ipc_dev *scu;
dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
if (!dr)
return NULL;
scu = __intel_scu_ipc_register(parent, scu_data, owner);
if (IS_ERR(scu)) {
devres_free(dr);
return scu;
}
dr->scu = scu;
devres_add(parent, dr);
return scu;
}
EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
static int __init intel_scu_ipc_init(void)
{
return class_register(&intel_scu_ipc_class);
}
subsys_initcall(intel_scu_ipc_init);
static void __exit intel_scu_ipc_exit(void)
{
class_unregister(&intel_scu_ipc_class);
}
module_exit(intel_scu_ipc_exit);
|