summaryrefslogtreecommitdiff
path: root/include/linux/kmsan.h
blob: e00de976ee4387245f01eb36bcb01a3bb4d8734f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * KMSAN API for subsystems.
 *
 * Copyright (C) 2017-2022 Google LLC
 * Author: Alexander Potapenko <glider@google.com>
 *
 */
#ifndef _LINUX_KMSAN_H
#define _LINUX_KMSAN_H

#include <linux/gfp.h>
#include <linux/kmsan-checks.h>
#include <linux/types.h>

struct page;
struct kmem_cache;
struct task_struct;

#ifdef CONFIG_KMSAN

/**
 * kmsan_task_create() - Initialize KMSAN state for the task.
 * @task: task to initialize.
 */
void kmsan_task_create(struct task_struct *task);

/**
 * kmsan_task_exit() - Notify KMSAN that a task has exited.
 * @task: task about to finish.
 */
void kmsan_task_exit(struct task_struct *task);

/**
 * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
 *
 * Allocate and initialize KMSAN metadata for early allocations.
 */
void __init kmsan_init_shadow(void);

/**
 * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
 */
void __init kmsan_init_runtime(void);

/**
 * kmsan_memblock_free_pages() - handle freeing of memblock pages.
 * @page:	struct page to free.
 * @order:	order of @page.
 *
 * Freed pages are either returned to buddy allocator or held back to be used
 * as metadata pages.
 */
bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);

/**
 * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
 * @page:  struct page pointer returned by alloc_pages().
 * @order: order of allocated struct page.
 * @flags: GFP flags used by alloc_pages()
 *
 * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
 * @flags contain __GFP_ZERO.
 */
void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);

/**
 * kmsan_free_page() - Notify KMSAN about a free_pages() call.
 * @page:  struct page pointer passed to free_pages().
 * @order: order of deallocated struct page.
 *
 * KMSAN marks freed memory as uninitialized.
 */
void kmsan_free_page(struct page *page, unsigned int order);

/**
 * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
 * @dst: destination page.
 * @src: source page.
 *
 * KMSAN copies the contents of metadata pages for @src into the metadata pages
 * for @dst. If @dst has no associated metadata pages, nothing happens.
 * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
 */
void kmsan_copy_page_meta(struct page *dst, struct page *src);

/**
 * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
 * @s:      slab cache the object belongs to.
 * @object: object pointer.
 * @flags:  GFP flags passed to the allocator.
 *
 * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
 * newly created object, marking it as initialized or uninitialized.
 */
void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);

/**
 * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
 * @s:      slab cache the object belongs to.
 * @object: object pointer.
 *
 * KMSAN marks the freed object as uninitialized.
 */
void kmsan_slab_free(struct kmem_cache *s, void *object);

/**
 * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
 * @ptr:   object pointer.
 * @size:  object size.
 * @flags: GFP flags passed to the allocator.
 *
 * Similar to kmsan_slab_alloc(), but for large allocations.
 */
void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);

/**
 * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
 * @ptr: object pointer.
 *
 * Similar to kmsan_slab_free(), but for large allocations.
 */
void kmsan_kfree_large(const void *ptr);

/**
 * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
 * @start:	start of vmapped range.
 * @end:	end of vmapped range.
 * @prot:	page protection flags used for vmap.
 * @pages:	array of pages.
 * @page_shift:	page_shift passed to vmap_range_noflush().
 *
 * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
 * vmalloc metadata address range.
 */
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
				    pgprot_t prot, struct page **pages,
				    unsigned int page_shift);

/**
 * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
 * @start: start of vunmapped range.
 * @end:   end of vunmapped range.
 *
 * KMSAN unmaps the contiguous metadata ranges created by
 * kmsan_map_kernel_range_noflush().
 */
void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);

/**
 * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
 * @addr:	range start.
 * @end:	range end.
 * @phys_addr:	physical range start.
 * @prot:	page protection flags used for ioremap_page_range().
 * @page_shift:	page_shift argument passed to vmap_range_noflush().
 *
 * KMSAN creates new metadata pages for the physical pages mapped into the
 * virtual memory.
 */
void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
			      phys_addr_t phys_addr, pgprot_t prot,
			      unsigned int page_shift);

/**
 * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
 * @start: range start.
 * @end:   range end.
 *
 * KMSAN unmaps the metadata pages for the given range and, unlike for
 * vunmap_page_range(), also deallocates them.
 */
void kmsan_iounmap_page_range(unsigned long start, unsigned long end);

#else

static inline void kmsan_init_shadow(void)
{
}

static inline void kmsan_init_runtime(void)
{
}

static inline bool kmsan_memblock_free_pages(struct page *page,
					     unsigned int order)
{
	return true;
}

static inline void kmsan_task_create(struct task_struct *task)
{
}

static inline void kmsan_task_exit(struct task_struct *task)
{
}

static inline int kmsan_alloc_page(struct page *page, unsigned int order,
				   gfp_t flags)
{
	return 0;
}

static inline void kmsan_free_page(struct page *page, unsigned int order)
{
}

static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
{
}

static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
				    gfp_t flags)
{
}

static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
{
}

static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
				       gfp_t flags)
{
}

static inline void kmsan_kfree_large(const void *ptr)
{
}

static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
						  unsigned long end,
						  pgprot_t prot,
						  struct page **pages,
						  unsigned int page_shift)
{
}

static inline void kmsan_vunmap_range_noflush(unsigned long start,
					      unsigned long end)
{
}

static inline void kmsan_ioremap_page_range(unsigned long start,
					    unsigned long end,
					    phys_addr_t phys_addr,
					    pgprot_t prot,
					    unsigned int page_shift)
{
}

static inline void kmsan_iounmap_page_range(unsigned long start,
					    unsigned long end)
{
}

#endif

#endif /* _LINUX_KMSAN_H */