From 50661eb1a2c88c0e50cb234cb117a7fbfe03b3e0 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 3 Jan 2024 18:13:22 -0500 Subject: drm/amdgpu: Auto-validate DMABuf imports in compute VMs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DMABuf imports in compute VMs are not wrapped in a kgd_mem object on the process_info->kfd_bo_list. There is no explicit KFD API call to validate them or add eviction fences to them. This patch automatically validates and fences dymanic DMABuf imports when they are added to a compute VM. Revalidation after evictions is handled in the VM code. v2: * Renamed amdgpu_vm_validate_evicted_bos to amdgpu_vm_validate * Eliminated evicted_user state, use evicted state for VM BOs and user BOs * Fixed and simplified amdgpu_vm_fence_imports, depends on reserved BOs * Moved dma_resv_reserve_fences for amdgpu_vm_fence_imports into amdgpu_vm_validate, outside the vm->status_lock * Added dummy version of amdgpu_amdkfd_bo_validate_and_fence for builds without KFD v4: Eliminate amdgpu_vm_fence_imports. It's not needed because the reservation with its fences is shared with the export, as long as all imports are from KFD, with the exports already reserved, validated and fenced by the KFD restore worker. v5: Reintroduced separate evicted_user state to simplify the state machine and CS error handling when amdgpu_vm_validate is called without a ticket. Signed-off-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 49a5f1c73b3e..b53add20088a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -187,7 +187,34 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, else ++bo_va->ref_count; amdgpu_bo_unreserve(abo); - return 0; + + /* Validate and add eviction fence to DMABuf imports with dynamic + * attachment in compute VMs. Re-validation will be done by + * amdgpu_vm_validate. Fences are on the reservation shared with the + * export, which is currently required to be validated and fenced + * already by amdgpu_amdkfd_gpuvm_restore_process_bos. + * + * Nested locking below for the case that a GEM object is opened in + * kfd_mem_export_dmabuf. Since the lock below is only taken for imports, + * but not for export, this is a different lock class that cannot lead to + * circular lock dependencies. + */ + if (!vm->is_compute_context || !vm->process_info) + return 0; + if (!obj->import_attach || + !dma_buf_is_dynamic(obj->import_attach->dmabuf)) + return 0; + mutex_lock_nested(&vm->process_info->lock, 1); + if (!WARN_ON(!vm->process_info->eviction_fence)) { + r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT, + &vm->process_info->eviction_fence->base); + if (r) + dev_warn(adev->dev, "%d: validate_and_fence failed: %d\n", + vm->task_info.pid, r); + } + mutex_unlock(&vm->process_info->lock); + + return r; } static void amdgpu_gem_object_close(struct drm_gem_object *obj, -- cgit v1.2.3-70-g09d2 From 00a11f977beb752186221679db2265a69118a5a7 Mon Sep 17 00:00:00 2001 From: Arunpravin Paneer Selvam Date: Thu, 11 Jan 2024 23:21:13 -0800 Subject: drm/amdgpu: Enable seq64 manager and fix bugs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Enable the seq64 mapping sequence. - Fix wflinfo va conflict and other bugs. v1: - The seq64 area needs to be included in the AMDGPU_VA_RESERVED_SIZE otherwise the areas will conflict with user space allocations (Alex) - It needs to be mapped read only in the user VM (Alex) v2: - Instead of just one define for TOP/BOTTOM reserved space separate them into two (Christian) - Fix the CPU and VA calculations and while at it also cleanup error handling and kerneldoc (Christian) Signed-off-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Arunpravin Paneer Selvam Reviewed-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 6 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 8 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c | 70 +++++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h | 9 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 ++- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 5 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 5 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 5 +- 11 files changed, 68 insertions(+), 52 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 796fa6f1420b..b5ad56690a9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -30,7 +30,7 @@ uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) { uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; - addr -= AMDGPU_VA_RESERVED_SIZE; + addr -= AMDGPU_VA_RESERVED_CSA_SIZE; addr = amdgpu_gmc_sign_extend(addr); return addr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index b53add20088a..22aeee8adb71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -709,10 +709,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, uint64_t vm_size; int r = 0; - if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { + if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) { dev_dbg(dev->dev, "va_address 0x%llx is in reserved area 0x%llx\n", - args->va_address, AMDGPU_VA_RESERVED_SIZE); + args->va_address, AMDGPU_VA_RESERVED_BOTTOM); return -EINVAL; } @@ -728,7 +728,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->va_address &= AMDGPU_GMC_HOLE_MASK; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; - vm_size -= AMDGPU_VA_RESERVED_SIZE; + vm_size -= AMDGPU_VA_RESERVED_TOP; if (args->va_address + args->map_size > vm_size) { dev_dbg(dev->dev, "va_address 0x%llx is in top reserved area 0x%llx\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 48496bb585c7..a2df3025a754 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -894,14 +894,14 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; - vm_size -= AMDGPU_VA_RESERVED_SIZE; + vm_size -= AMDGPU_VA_RESERVED_TOP; /* Older VCE FW versions are buggy and can handle only 40bits */ if (adev->vce.fw_version && adev->vce.fw_version < AMDGPU_VCE_FW_53_45) vm_size = min(vm_size, 1ULL << 40); - dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; + dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_BOTTOM; dev_info->virtual_address_max = min(vm_size, AMDGPU_GMC_HOLE_START); @@ -1379,6 +1379,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto error_vm; } + r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va); + if (r) + goto error_vm; + mutex_init(&fpriv->bo_list_lock); idr_init_base(&fpriv->bo_list_handles, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index da48b6da0107..1f3dfbb6bb16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1398,7 +1398,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev) goto error_fini; } - ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE; + ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data); if (r) { DRM_ERROR("failed to map ctx meta data\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c index 7a6a67275404..e9081a98cf81 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c @@ -35,14 +35,29 @@ * counters and VM updates. It has maximum count of 32768 64 bit slots. */ +/** + * amdgpu_seq64_get_va_base - Get the seq64 va base address + * + * @adev: amdgpu_device pointer + * + * Returns: + * va base address on success + */ +static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev) +{ + u64 addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; + + addr -= AMDGPU_VA_RESERVED_TOP; + + return addr; +} + /** * amdgpu_seq64_map - Map the seq64 memory to VM * * @adev: amdgpu_device pointer * @vm: vm pointer * @bo_va: bo_va pointer - * @seq64_addr: seq64 vaddr start address - * @size: seq64 pool size * * Map the seq64 memory to the given VM. * @@ -50,11 +65,11 @@ * 0 on success or a negative error code on failure */ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo_va **bo_va, u64 seq64_addr, - uint32_t size) + struct amdgpu_bo_va **bo_va) { struct amdgpu_bo *bo; struct drm_exec exec; + u64 seq64_addr; int r; bo = adev->seq64.sbo; @@ -77,9 +92,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error; } - r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); + seq64_addr = amdgpu_seq64_get_va_base(adev); + r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE, + AMDGPU_PTE_READABLE); if (r) { DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r); amdgpu_vm_bo_del(adev, *bo_va); @@ -144,31 +159,25 @@ error: * amdgpu_seq64_alloc - Allocate a 64 bit memory * * @adev: amdgpu_device pointer - * @gpu_addr: allocated gpu VA start address - * @cpu_addr: allocated cpu VA start address + * @va: VA to access the seq in process address space + * @cpu_addr: CPU address to access the seq * * Alloc a 64 bit memory from seq64 pool. * * Returns: * 0 on success or a negative error code on failure */ -int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, - u64 **cpu_addr) +int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr) { unsigned long bit_pos; - u32 offset; bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem); + if (bit_pos >= adev->seq64.num_sem) + return -ENOSPC; - if (bit_pos < adev->seq64.num_sem) { - __set_bit(bit_pos, adev->seq64.used); - offset = bit_pos << 6; /* convert to qw offset */ - } else { - return -EINVAL; - } - - *gpu_addr = offset + AMDGPU_SEQ64_VADDR_START; - *cpu_addr = offset + adev->seq64.cpu_base_addr; + __set_bit(bit_pos, adev->seq64.used); + *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev); + *cpu_addr = bit_pos + adev->seq64.cpu_base_addr; return 0; } @@ -177,20 +186,17 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, * amdgpu_seq64_free - Free the given 64 bit memory * * @adev: amdgpu_device pointer - * @gpu_addr: gpu start address to be freed + * @va: gpu start address to be freed * * Free the given 64 bit memory from seq64 pool. - * */ -void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr) +void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va) { - u32 offset; - - offset = gpu_addr - AMDGPU_SEQ64_VADDR_START; + unsigned long bit_pos; - offset >>= 6; - if (offset < adev->seq64.num_sem) - __clear_bit(offset, adev->seq64.used); + bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64); + if (bit_pos < adev->seq64.num_sem) + __clear_bit(bit_pos, adev->seq64.used); } /** @@ -229,7 +235,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev) * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS * 64bit slots */ - r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE, + r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->seq64.sbo, NULL, (void **)&adev->seq64.cpu_base_addr); @@ -238,7 +244,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev) return r; } - memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE); + memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE); adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS; memset(&adev->seq64.used, 0, sizeof(adev->seq64.used)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h index 2196e72be508..4203b2ab318d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h @@ -25,10 +25,9 @@ #ifndef __AMDGPU_SEQ64_H__ #define __AMDGPU_SEQ64_H__ -#define AMDGPU_SEQ64_SIZE (2ULL << 20) -#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8)) -#define AMDGPU_SEQ64_VADDR_OFFSET 0x50000 -#define AMDGPU_SEQ64_VADDR_START (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET) +#include "amdgpu_vm.h" + +#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_VA_RESERVED_SEQ64_SIZE / sizeof(u64)) struct amdgpu_seq64 { struct amdgpu_bo *sbo; @@ -42,7 +41,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev); int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr); void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr); int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size); + struct amdgpu_bo_va **bo_va); void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index bfbf59326ee1..ab820cf52668 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -358,7 +358,7 @@ static int setup_umsch_mm_test(struct amdgpu_device *adev, memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data)); - test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE; + test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM; r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va, test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data)); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67d12f86bfe4..666698a57192 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -136,7 +136,11 @@ struct amdgpu_mem_stats; #define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS) /* Reserve 2MB at top/bottom of address space for kernel use */ -#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) +#define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20) +#define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20) +#define AMDGPU_VA_RESERVED_BOTTOM (2ULL << 20) +#define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_SEQ64_SIZE + \ + AMDGPU_VA_RESERVED_CSA_SIZE) /* See vm_update_mode */ #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 42e103d7077d..229263e407e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -435,9 +435,10 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) WREG32(mmVM_PRT_CNTL, tmp); if (enable) { - uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; + uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >> + AMDGPU_GPU_PAGE_SHIFT; uint32_t high = adev->vm_manager.max_pfn - - (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); + (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index efc16e580f1e..d95f719eec55 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -563,9 +563,10 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) WREG32(mmVM_PRT_CNTL, tmp); if (enable) { - uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; + uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >> + AMDGPU_GPU_PAGE_SHIFT; uint32_t high = adev->vm_manager.max_pfn - - (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); + (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index ff4ae73d27ec..4eb0cccdb413 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -777,9 +777,10 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) WREG32(mmVM_PRT_CNTL, tmp); if (enable) { - uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; + uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >> + AMDGPU_GPU_PAGE_SHIFT; uint32_t high = adev->vm_manager.max_pfn - - (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); + (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); -- cgit v1.2.3-70-g09d2