summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/include
diff options
context:
space:
mode:
authorDanilo Krummrich <dakr@redhat.com>2023-08-04 20:23:50 +0200
committerDanilo Krummrich <dakr@redhat.com>2023-08-04 20:34:39 +0200
commit6b252cf42281045a9f803d2198023500cfa6ebd2 (patch)
treef3c5d3ad01638a449b6b41e7ef7e15e05f941654 /drivers/gpu/drm/nouveau/include
parent7576c4ca6d817221688e985f20eecc1f0ebead93 (diff)
drm/nouveau: nvkm/vmm: implement raw ops to manage uvmm
The new VM_BIND UAPI uses the DRM GPU VA manager to manage the VA space. Hence, we a need a way to manipulate the MMUs page tables without going through the internal range allocator implemented by nvkm/vmm. This patch adds a raw interface for nvkm/vmm to pass the resposibility for managing the address space and the corresponding map/unmap/sparse operations to the upper layers. Reviewed-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Danilo Krummrich <dakr@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230804182406.5222-11-dakr@redhat.com
Diffstat (limited to 'drivers/gpu/drm/nouveau/include')
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000c.h26
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/vmm.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h20
3 files changed, 61 insertions, 4 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
index 9c7ff56831c5..a5a182b3c28d 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if000c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -3,7 +3,10 @@
struct nvif_vmm_v0 {
__u8 version;
__u8 page_nr;
- __u8 managed;
+#define NVIF_VMM_V0_TYPE_UNMANAGED 0x00
+#define NVIF_VMM_V0_TYPE_MANAGED 0x01
+#define NVIF_VMM_V0_TYPE_RAW 0x02
+ __u8 type;
__u8 pad03[5];
__u64 addr;
__u64 size;
@@ -17,6 +20,7 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP 0x04
#define NVIF_VMM_V0_PFNMAP 0x05
#define NVIF_VMM_V0_PFNCLR 0x06
+#define NVIF_VMM_V0_RAW 0x07
#define NVIF_VMM_V0_MTHD(i) ((i) + 0x80)
struct nvif_vmm_page_v0 {
@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 {
__u64 addr;
};
+struct nvif_vmm_raw_v0 {
+ __u8 version;
+#define NVIF_VMM_RAW_V0_GET 0x0
+#define NVIF_VMM_RAW_V0_PUT 0x1
+#define NVIF_VMM_RAW_V0_MAP 0x2
+#define NVIF_VMM_RAW_V0_UNMAP 0x3
+#define NVIF_VMM_RAW_V0_SPARSE 0x4
+ __u8 op;
+ __u8 sparse;
+ __u8 ref;
+ __u8 shift;
+ __u32 argc;
+ __u8 pad01[7];
+ __u64 addr;
+ __u64 size;
+ __u64 offset;
+ __u64 memory;
+ __u64 argv;
+};
+
struct nvif_vmm_pfnmap_v0 {
__u8 version;
__u8 page;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
index a2ee92201ace..0ecedd0ee0a5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/vmm.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -4,6 +4,12 @@
struct nvif_mem;
struct nvif_mmu;
+enum nvif_vmm_type {
+ UNMANAGED,
+ MANAGED,
+ RAW,
+};
+
enum nvif_vmm_get {
ADDR,
PTES,
@@ -30,8 +36,9 @@ struct nvif_vmm {
int page_nr;
};
-int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed,
- u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *);
+int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass,
+ enum nvif_vmm_type, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_vmm *);
void nvif_vmm_dtor(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *);
@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64);
+
+int nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
+int nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
+int nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
+ void *argv, u32 argc, struct nvif_mem *mem, u64 offset);
+int nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
+ u8 shift, bool sparse);
+int nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 70e7887ef4b4..2fd2f2433fc7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -17,6 +17,7 @@ struct nvkm_vma {
bool part:1; /* Region was split from an allocated region by map(). */
bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */
+ bool no_comp:1; /* Force no memory compression. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */
};
@@ -27,10 +28,26 @@ struct nvkm_vmm {
const char *name;
u32 debug;
struct kref kref;
- struct mutex mutex;
+
+ struct {
+ struct mutex vmm;
+ struct mutex ref;
+ struct mutex map;
+ } mutex;
u64 start;
u64 limit;
+ struct {
+ struct {
+ u64 addr;
+ u64 size;
+ } p;
+ struct {
+ u64 addr;
+ u64 size;
+ } n;
+ bool raw;
+ } managed;
struct nvkm_vmm_pt *pd;
struct list_head join;
@@ -70,6 +87,7 @@ struct nvkm_vmm_map {
const struct nvkm_vmm_page *page;
+ bool no_comp;
struct nvkm_tags *tags;
u64 next;
u64 type;