summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/asm-generic/mshyperv.h1
-rw-r--r--include/asm-generic/tlb.h10
-rw-r--r--include/drm/bridge/dw_hdmi.h11
-rw-r--r--include/drm/dp/drm_dp_helper.h1
-rw-r--r--include/drm/drm_atomic.h27
-rw-r--r--include/drm/drm_edid.h12
-rw-r--r--include/drm/drm_file.h2
-rw-r--r--include/drm/drm_format_helper.h5
-rw-r--r--include/drm/drm_gem.h5
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h2
-rw-r--r--include/drm/gpu_scheduler.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h64
-rw-r--r--include/drm/ttm/ttm_bo_driver.h40
-rw-r--r--include/drm/ttm/ttm_device.h11
-rw-r--r--include/drm/ttm/ttm_resource.h77
-rw-r--r--include/drm/ttm/ttm_tt.h4
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/linux/dma-buf.h26
-rw-r--r--include/linux/dma-fence-array.h19
-rw-r--r--include/linux/dma-fence-chain.h2
-rw-r--r--include/linux/dma-fence-unwrap.h95
-rw-r--r--include/linux/dma-resv.h233
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/gfp.h8
-rw-r--r--include/linux/gpio/driver.h9
-rw-r--r--include/linux/kobject.h1
-rw-r--r--include/linux/local_lock_internal.h6
-rw-r--r--include/linux/mmc/core.h2
-rw-r--r--include/linux/mmzone.h11
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/seqlock.h8
-rw-r--r--include/linux/static_call.h48
-rw-r--r--include/linux/sunrpc/xprt.h5
-rw-r--r--include/linux/virtio_config.h6
-rw-r--r--include/net/mctp.h2
-rw-r--r--include/trace/events/sunrpc.h1
-rw-r--r--include/uapi/drm/vmwgfx_drm.h9
41 files changed, 461 insertions, 318 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 3f7f01f03869..c4b78c21d793 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -509,7 +509,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
* External Functions
*/
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta);
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index c08758b6b364..c05d2ce9b6cd 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -269,6 +269,7 @@ bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
void *hv_map_memory(void *addr, unsigned long size);
void hv_unmap_memory(void *addr);
#else /* CONFIG_HYPERV */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index fd7feb5c7894..eee6f7763a39 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -565,10 +565,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
- if (_sz == PMD_SIZE) \
- tlb_flush_pmd_range(tlb, address, _sz); \
- else if (_sz == PUD_SIZE) \
+ if (_sz >= P4D_SIZE) \
+ tlb_flush_p4d_range(tlb, address, _sz); \
+ else if (_sz >= PUD_SIZE) \
tlb_flush_pud_range(tlb, address, _sz); \
+ else if (_sz >= PMD_SIZE) \
+ tlb_flush_pmd_range(tlb, address, _sz); \
+ else \
+ tlb_flush_pte_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 2a1f85f9a8a3..f668e75fbabe 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -143,6 +143,11 @@ struct dw_hdmi_plat_data {
const struct drm_display_info *info,
const struct drm_display_mode *mode);
+ /* Platform-specific audio enable/disable (optional) */
+ void (*enable_audio)(struct dw_hdmi *hdmi, int channel,
+ int width, int rate, int non_pcm);
+ void (*disable_audio)(struct dw_hdmi *hdmi);
+
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
const char *phy_name;
@@ -173,6 +178,8 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev);
+void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm);
+void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status);
@@ -187,9 +194,11 @@ void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address);
void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
unsigned char addr);
+void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi);
+
void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable);
void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable);
-void dw_hdmi_phy_reset(struct dw_hdmi *hdmi);
+void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi);
enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
void *data);
diff --git a/include/drm/dp/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h
index 1eccd9741943..91af98e6617c 100644
--- a/include/drm/dp/drm_dp_helper.h
+++ b/include/drm/dp/drm_dp_helper.h
@@ -2053,6 +2053,7 @@ struct drm_dp_aux {
bool is_remote;
};
+int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 1701c2128a5c..0777725085df 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -227,6 +227,18 @@ struct drm_private_state_funcs {
*/
void (*atomic_destroy_state)(struct drm_private_obj *obj,
struct drm_private_state *state);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses &struct drm_private_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_private_obj_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_private_state *state);
};
/**
@@ -311,14 +323,21 @@ struct drm_private_obj {
/**
* struct drm_private_state - base struct for driver private object state
- * @state: backpointer to global drm_atomic_state
*
- * Currently only contains a backpointer to the overall atomic update, but in
- * the future also might hold synchronization information similar to e.g.
- * &drm_crtc.commit.
+ * Currently only contains a backpointer to the overall atomic update,
+ * and the relevant private object but in the future also might hold
+ * synchronization information similar to e.g. &drm_crtc.commit.
*/
struct drm_private_state {
+ /**
+ * @state: backpointer to global drm_atomic_state
+ */
struct drm_atomic_state *state;
+
+ /**
+ * @obj: backpointer to the private object
+ */
+ struct drm_private_obj *obj;
};
struct __drm_private_objs_state {
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 144c495b99c4..b7e170584000 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -372,8 +372,8 @@ struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
-int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
-int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
+int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads);
+int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
@@ -569,8 +569,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
int drm_add_override_edid_modes(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-bool drm_detect_hdmi_monitor(struct edid *edid);
-bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_detect_hdmi_monitor(const struct edid *edid);
+bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
@@ -578,11 +578,11 @@ int drm_add_modes_noedid(struct drm_connector *connector,
void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref);
-int drm_edid_header_is_valid(const u8 *raw_edid);
+int drm_edid_header_is_valid(const void *edid);
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
bool *edid_corrupt);
bool drm_edid_is_valid(struct edid *edid);
-void drm_edid_get_monitor_name(struct edid *edid, char *name,
+void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index a3acb7ac3550..e0a73a1e2df7 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -248,7 +248,7 @@ struct drm_file {
*/
struct drm_master *master;
- /** @master_lock: Serializes @master. */
+ /** @master_lookup_lock: Serializes @master. */
spinlock_t master_lookup_lock;
/** @pid: Process that opened this file. */
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 0b0937c0b2f6..55145eca0782 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -43,8 +43,7 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
const void *vmap, const struct drm_framebuffer *fb,
const struct drm_rect *rect);
-void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_mono(void *dst, unsigned int dst_pitch, const void *src,
+ const struct drm_framebuffer *fb, const struct drm_rect *clip);
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index e2941cee14b6..9d7c61a122dc 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -407,11 +407,6 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
-int drm_gem_fence_array_add(struct xarray *fence_array,
- struct dma_fence *fence);
-int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
- struct drm_gem_object *obj,
- bool write);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 147e51b6d241..51e09a1a106a 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -137,6 +137,8 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
/* transmit data in low power */
#define MIPI_DSI_MODE_LPM BIT(11)
+/* transmit data ending at the same time for all lanes within one hsync */
+#define MIPI_DSI_HS_PKT_END_ALIGNED BIT(12)
enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB888,
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 2fa6b2c33b3f..a80ae9639e96 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -492,6 +492,8 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
void drm_mode_copy(struct drm_display_mode *dst,
const struct drm_display_mode *src);
+void drm_mode_init(struct drm_display_mode *dst,
+ const struct drm_display_mode *src);
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
bool drm_mode_match(const struct drm_display_mode *mode1,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index fdfa9f37ce05..fafa70ac1337 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1384,7 +1384,7 @@ struct drm_mode_config_helper_funcs {
* starting to commit the update to the hardware.
*
* After the atomic update is committed to the hardware this hook needs
- * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
+ * to call drm_atomic_helper_commit_hw_done(). Then wait for the update
* to be executed by the hardware, for example using
* drm_atomic_helper_wait_for_vblanks() or
* drm_atomic_helper_wait_for_flip_done(), and then clean up the old
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 944f83ef9f2e..0fca8f38bee4 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -270,6 +270,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
+ * @work: Helper to reschdeule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 155b19ee12fb..2d524f8b0802 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -55,8 +55,6 @@ struct ttm_placement;
struct ttm_place;
-struct ttm_lru_bulk_move;
-
/**
* enum ttm_bo_type
*
@@ -94,10 +92,8 @@ struct ttm_tt;
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
* @deleted: True if the object is only a zombie and already deleted.
- * @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
- * @moving: Fence set when BO is moving
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
@@ -138,19 +134,18 @@ struct ttm_buffer_object {
struct ttm_resource *resource;
struct ttm_tt *ttm;
bool deleted;
+ struct ttm_lru_bulk_move *bulk_move;
/**
* Members protected by the bdev::lru_lock.
*/
- struct list_head lru;
struct list_head ddestroy;
/**
* Members protected by a bo reservation.
*/
- struct dma_fence *moving;
unsigned priority;
unsigned pin_count;
@@ -291,30 +286,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
void ttm_bo_put(struct ttm_buffer_object *bo);
-/**
- * ttm_bo_move_to_lru_tail
- *
- * @bo: The buffer object.
- * @mem: Resource object.
- * @bulk: optional bulk move structure to remember BO positions
- *
- * Move this BO to the tail of all lru lists used to lookup and reserve an
- * object. This function must be called with struct ttm_global::lru_lock
- * held, and is used to make a BO less likely to be considered for eviction.
- */
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
- struct ttm_resource *mem,
- struct ttm_lru_bulk_move *bulk);
-
-/**
- * ttm_bo_bulk_move_lru_tail
- *
- * @bulk: bulk move structure
- *
- * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_global::lru_lock held.
- */
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk);
/**
* ttm_bo_lock_delayed_workqueue
@@ -540,34 +514,8 @@ ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
-/**
- * ttm_bo_pin - Pin the buffer object.
- * @bo: The buffer object to pin
- *
- * Make sure the buffer is not evicted any more during memory pressure.
- */
-static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
-{
- dma_resv_assert_held(bo->base.resv);
- WARN_ON_ONCE(!kref_read(&bo->kref));
- ++bo->pin_count;
-}
-
-/**
- * ttm_bo_unpin - Unpin the buffer object.
- * @bo: The buffer object to unpin
- *
- * Allows the buffer object to be evicted again during memory pressure.
- */
-static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
-{
- dma_resv_assert_held(bo->base.resv);
- WARN_ON_ONCE(!kref_read(&bo->kref));
- if (bo->pin_count)
- --bo->pin_count;
- else
- WARN_ON_ONCE(true);
-}
+void ttm_bo_pin(struct ttm_buffer_object *bo);
+void ttm_bo_unpin(struct ttm_buffer_object *bo);
int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 5f087575194b..897b88f0bd59 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -45,33 +45,6 @@
#include "ttm_tt.h"
#include "ttm_pool.h"
-/**
- * struct ttm_lru_bulk_move_pos
- *
- * @first: first BO in the bulk move range
- * @last: last BO in the bulk move range
- *
- * Positions for a lru bulk move.
- */
-struct ttm_lru_bulk_move_pos {
- struct ttm_buffer_object *first;
- struct ttm_buffer_object *last;
-};
-
-/**
- * struct ttm_lru_bulk_move
- *
- * @tt: first/last lru entry for BOs in the TT domain
- * @vram: first/last lru entry for BOs in the VRAM domain
- * @swap: first/last lru entry for BOs on the swap list
- *
- * Helper structure for bulk moves on the LRU list.
- */
-struct ttm_lru_bulk_move {
- struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
- struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
-};
-
/*
* ttm_bo.c
*/
@@ -182,7 +155,7 @@ static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
+ ttm_bo_move_to_lru_tail(bo);
spin_unlock(&bo->bdev->lru_lock);
}
@@ -272,7 +245,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem);
/**
- * ttm_bo_move_accel_cleanup.
+ * ttm_bo_move_sync_cleanup.
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_mem: struct ttm_resource indicating where to move.
@@ -280,13 +253,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
* by the caller to be idle. Typically used after memcpy buffer moves.
*/
-static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem)
-{
- int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem);
-
- WARN_ON(ret);
-}
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem);
/**
* ttm_bo_pipeline_gutting.
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 0a4ddec78d8f..95b3c04b1ab9 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -30,8 +30,6 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_pool.h>
-#define TTM_NUM_MEM_TYPES 8
-
struct ttm_device;
struct ttm_placement;
struct ttm_buffer_object;
@@ -201,15 +199,6 @@ struct ttm_device_funcs {
void *buf, int len, int write);
/**
- * struct ttm_bo_driver member del_from_lru_notify
- *
- * @bo: the buffer object deleted from lru
- *
- * notify driver that a BO was deleted from LRU.
- */
- void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
-
- /**
* Notify the driver that we're about to release a BO
*
* @bo: BO that is about to be released
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 0f1d55262c68..441653693970 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -26,14 +26,17 @@
#define _TTM_RESOURCE_H_
#include <linux/types.h>
+#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/iosys-map.h>
#include <linux/dma-fence.h>
+
#include <drm/drm_print.h>
#include <drm/ttm/ttm_caching.h>
#include <drm/ttm/ttm_kmap_iter.h>
#define TTM_MAX_BO_PRIORITY 4U
+#define TTM_NUM_MEM_TYPES 8
struct ttm_device;
struct ttm_resource_manager;
@@ -178,6 +181,47 @@ struct ttm_resource {
uint32_t placement;
struct ttm_bus_placement bus;
struct ttm_buffer_object *bo;
+
+ /**
+ * @lru: Least recently used list, see &ttm_resource_manager.lru
+ */
+ struct list_head lru;
+};
+
+/**
+ * struct ttm_resource_cursor
+ *
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ unsigned int priority;
+};
+
+/**
+ * struct ttm_lru_bulk_move_pos
+ *
+ * @first: first res in the bulk move range
+ * @last: last res in the bulk move range
+ *
+ * Range of resources for a lru bulk move.
+ */
+struct ttm_lru_bulk_move_pos {
+ struct ttm_resource *first;
+ struct ttm_resource *last;
+};
+
+/**
+ * struct ttm_lru_bulk_move
+ *
+ * @pos: first/last lru entry for resources in the each domain/priority
+ *
+ * Container for the current bulk move state. Should be used with
+ * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
+ */
+struct ttm_lru_bulk_move {
+ struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
};
/**
@@ -266,6 +310,15 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
man->move = NULL;
}
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res);
+void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource *res);
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res);
+
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res);
@@ -292,6 +345,26 @@ uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor);
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_manager *man,
+ struct ttm_resource_cursor *cursor,
+ struct ttm_resource *res);
+
+/**
+ * ttm_resource_manager_for_each_res - iterate over all resources
+ * @man: the resource manager
+ * @cursor: struct ttm_resource_cursor for the current position
+ * @res: the current resource
+ *
+ * Iterate over all the evictable resources in a resource manager.
+ */
+#define ttm_resource_manager_for_each_res(man, cursor, res) \
+ for (res = ttm_resource_manager_first(man, cursor); res; \
+ res = ttm_resource_manager_next(man, cursor, res))
+
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
struct io_mapping *iomap,
@@ -308,4 +381,8 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
struct ttm_device *bdev,
struct ttm_resource *mem);
+
+void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
+ struct dentry * parent,
+ const char *name);
#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index f20832139815..17a0310e8aaa 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -140,6 +140,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* @bo: The buffer object we create the ttm for.
* @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
* @caching: the desired caching state of the pages
+ * @extra_pages: Extra pages needed for the driver.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
@@ -147,7 +148,8 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* NULL: Out of memory.
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags, enum ttm_caching caching);
+ uint32_t page_flags, enum ttm_caching caching,
+ unsigned long extra_pages);
int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c1fc4af47f69..3a9d2d7cc6b7 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -570,9 +570,11 @@ static inline u32 type_flag(u32 type)
return type & ~BPF_BASE_TYPE_MASK;
}
+/* only use after check_attach_btf_id() */
static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
{
- return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type;
+ return prog->type == BPF_PROG_TYPE_EXT ?
+ prog->aux->dst_prog->type : prog->type;
}
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 2097760e8e95..71731796c8c3 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -393,27 +393,30 @@ struct dma_buf {
* e.g. exposed in `Implicit Fence Poll Support`_ must follow the
* below rules.
*
- * - Drivers must add a shared fence through dma_resv_add_shared_fence()
- * for anything the userspace API considers a read access. This highly
- * depends upon the API and window system.
+ * - Drivers must add a read fence through dma_resv_add_fence() with the
+ * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
+ * read access. This highly depends upon the API and window system.
*
- * - Similarly drivers must set the exclusive fence through
- * dma_resv_add_excl_fence() for anything the userspace API considers
- * write access.
+ * - Similarly drivers must add a write fence through
+ * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
+ * anything the userspace API considers write access.
*
- * - Drivers may just always set the exclusive fence, since that only
+ * - Drivers may just always add a write fence, since that only
* causes unecessarily synchronization, but no correctness issues.
*
* - Some drivers only expose a synchronous userspace API with no
* pipelining across drivers. These do not set any fences for their
* access. An example here is v4l.
*
+ * - Driver should use dma_resv_usage_rw() when retrieving fences as
+ * dependency for implicit synchronization.
+ *
* DYNAMIC IMPORTER RULES:
*
* Dynamic importers, see dma_buf_attachment_is_dynamic(), have
* additional constraints on how they set up fences:
*
- * - Dynamic importers must obey the exclusive fence and wait for it to
+ * - Dynamic importers must obey the write fences and wait for them to
* signal before allowing access to the buffer's underlying storage
* through the device.
*
@@ -423,10 +426,9 @@ struct dma_buf {
*
* IMPORTANT:
*
- * All drivers must obey the struct dma_resv rules, specifically the
- * rules for updating fences, see &dma_resv.fence_excl and
- * &dma_resv.fence. If these dependency rules are broken access tracking
- * can be lost resulting in use after free issues.
+ * All drivers and memory management related functions must obey the
+ * struct dma_resv rules, specifically the rules for updating and
+ * obeying fences. See enum dma_resv_usage for further descriptions.
*/
struct dma_resv *resv;
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index fec374f69e12..ec7f25def392 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -61,6 +61,21 @@ to_dma_fence_array(struct dma_fence *fence)
return container_of(fence, struct dma_fence_array, base);
}
+/**
+ * dma_fence_array_for_each - iterate over all fences in array
+ * @fence: current fence
+ * @index: index into the array
+ * @head: potential dma_fence_array object
+ *
+ * Test if @array is a dma_fence_array object and if yes iterate over all fences
+ * in the array. If not just iterate over the fence in @array itself.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_array_for_each(fence, index, head) \
+ for (index = 0, fence = dma_fence_array_first(head); fence; \
+ ++(index), fence = dma_fence_array_next(head, index))
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
@@ -68,4 +83,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+struct dma_fence *dma_fence_array_first(struct dma_fence *head);
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index);
+
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 10d51bcdf7b7..4bdf0b96da28 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -112,6 +112,8 @@ static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
*
* Iterate over all fences in the chain. We keep a reference to the current
* fence while inside the loop which must be dropped when breaking out.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
*/
#define dma_fence_chain_for_each(iter, head) \
for (iter = dma_fence_get(head); iter; \
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
new file mode 100644
index 000000000000..77e335a1bcac
--- /dev/null
+++ b/include/linux/dma-fence-unwrap.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_UNWRAP_H
+#define __LINUX_DMA_FENCE_UNWRAP_H
+
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-array.h>
+
+/**
+ * struct dma_fence_unwrap - cursor into the container structure
+ *
+ * Should be used with dma_fence_unwrap_for_each() iterator macro.
+ */
+struct dma_fence_unwrap {
+ /**
+ * @chain: potential dma_fence_chain, but can be other fence as well
+ */
+ struct dma_fence *chain;
+ /**
+ * @array: potential dma_fence_array, but can be other fence as well
+ */
+ struct dma_fence *array;
+ /**
+ * @index: last returned index if @array is really a dma_fence_array
+ */
+ unsigned int index;
+};
+
+/* Internal helper to start new array iteration, don't use directly */
+static inline struct dma_fence *
+__dma_fence_unwrap_array(struct dma_fence_unwrap * cursor)
+{
+ cursor->array = dma_fence_chain_contained(cursor->chain);
+ cursor->index = 0;
+ return dma_fence_array_first(cursor->array);
+}
+
+/**
+ * dma_fence_unwrap_first - return the first fence from fence containers
+ * @head: the entrypoint into the containers
+ * @cursor: current position inside the containers
+ *
+ * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
+ * first fence.
+ */
+static inline struct dma_fence *
+dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor)
+{
+ cursor->chain = dma_fence_get(head);
+ return __dma_fence_unwrap_array(cursor);
+}
+
+/**
+ * dma_fence_unwrap_next - return the next fence from a fence containers
+ * @cursor: current position inside the containers
+ *
+ * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
+ * the next fence from them.
+ */
+static inline struct dma_fence *
+dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
+{
+ struct dma_fence *tmp;
+
+ ++cursor->index;
+ tmp = dma_fence_array_next(cursor->array, cursor->index);
+ if (tmp)
+ return tmp;
+
+ cursor->chain = dma_fence_chain_walk(cursor->chain);
+ return __dma_fence_unwrap_array(cursor);
+}
+
+/**
+ * dma_fence_unwrap_for_each - iterate over all fences in containers
+ * @fence: current fence
+ * @cursor: current position inside the containers
+ * @head: starting point for the iterator
+ *
+ * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
+ * potential fences in them. If @head is just a normal fence only that one is
+ * returned.
+ */
+#define dma_fence_unwrap_for_each(fence, cursor, head) \
+ for (fence = dma_fence_unwrap_first(head, cursor); fence; \
+ fence = dma_fence_unwrap_next(cursor))
+
+#endif
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index afdfdfac729f..c8ccbc94d5d2 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -47,24 +47,89 @@
extern struct ww_class reservation_ww_class;
+struct dma_resv_list;
+
/**
- * struct dma_resv_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
+ * enum dma_resv_usage - how the fences from a dma_resv obj are used
+ *
+ * This enum describes the different use cases for a dma_resv object and
+ * controls which fences are returned when queried.
+ *
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
*/
-struct dma_resv_list {
- struct rcu_head rcu;
- u32 shared_count, shared_max;
- struct dma_fence __rcu *shared[];
+enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
+ /**
+ * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit write dependency.
+ */
+ DMA_RESV_USAGE_WRITE,
+
+ /**
+ * @DMA_RESV_USAGE_READ: Implicit read synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit read dependency.
+ */
+ DMA_RESV_USAGE_READ,
+
+ /**
+ * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+ *
+ * This should be used by submissions which don't want to participate in
+ * implicit synchronization.
+ *
+ * The most common case are preemption fences as well as page table
+ * updates and their TLB flushes.
+ */
+ DMA_RESV_USAGE_BOOKKEEP
};
/**
+ * dma_resv_usage_rw - helper for implicit sync
+ * @write: true if we create a new implicit sync write
+ *
+ * This returns the implicit synchronization usage for write or read accesses,
+ * see enum dma_resv_usage and &dma_buf.resv.
+ */
+static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
+{
+ /* This looks confusing at first sight, but is indeed correct.
+ *
+ * The rational is that new write operations needs to wait for the
+ * existing read and write operations to finish.
+ * But a new read operation only needs to wait for the existing write
+ * operations to finish.
+ */
+ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
+}
+
+/**
* struct dma_resv - a reservation object manages fences for a buffer
*
- * There are multiple uses for this, with sometimes slightly different rules in
- * how the fence slots are used.
+ * This is a container for dma_fence objects which needs to handle multiple use
+ * cases.
*
* One use is to synchronize cross-driver access to a struct dma_buf, either for
* dynamic buffer management or just to handle implicit synchronization between
@@ -91,62 +156,16 @@ struct dma_resv {
struct ww_mutex lock;
/**
- * @seq:
+ * @fences:
*
- * Sequence count for managing RCU read-side synchronization, allows
- * read-only access to @fence_excl and @fence while ensuring we take a
- * consistent snapshot.
- */
- seqcount_ww_mutex_t seq;
-
- /**
- * @fence_excl:
- *
- * The exclusive fence, if there is one currently.
+ * Array of fences which where added to the dma_resv object
*
- * There are two ways to update this fence:
- *
- * - First by calling dma_resv_add_excl_fence(), which replaces all
- * fences attached to the reservation object. To guarantee that no
- * fences are lost, this new fence must signal only after all previous
- * fences, both shared and exclusive, have signalled. In some cases it
- * is convenient to achieve that by attaching a struct dma_fence_array
- * with all the new and old fences.
- *
- * - Alternatively the fence can be set directly, which leaves the
- * shared fences unchanged. To guarantee that no fences are lost, this
- * new fence must signal only after the previous exclusive fence has
- * signalled. Since the shared fences are staying intact, it is not
- * necessary to maintain any ordering against those. If semantically
- * only a new access is added without actually treating the previous
- * one as a dependency the exclusive fences can be strung together
- * using struct dma_fence_chain.
- *
- * Note that actual semantics of what an exclusive or shared fence mean
- * is defined by the user, for reservation objects shared across drivers
- * see &dma_buf.resv.
- */
- struct dma_fence __rcu *fence_excl;
-
- /**
- * @fence:
- *
- * List of current shared fences.
- *
- * There are no ordering constraints of shared fences against the
- * exclusive fence slot. If a waiter needs to wait for all access, it
- * has to wait for both sets of fences to signal.
- *
- * A new fence is added by calling dma_resv_add_shared_fence(). Since
- * this often needs to be done past the point of no return in command
+ * A new fence is added by calling dma_resv_add_fence(). Since this
+ * often needs to be done past the point of no return in command
* submission it cannot fail, and therefore sufficient slots need to be
- * reserved by calling dma_resv_reserve_shared().
- *
- * Note that actual semantics of what an exclusive or shared fence mean
- * is defined by the user, for reservation objects shared across drivers
- * see &dma_buf.resv.
+ * reserved by calling dma_resv_reserve_fences().
*/
- struct dma_resv_list __rcu *fence;
+ struct dma_resv_list __rcu *fences;
};
/**
@@ -165,14 +184,14 @@ struct dma_resv_iter {
/** @obj: The dma_resv object we iterate over */
struct dma_resv *obj;
- /** @all_fences: If all fences should be returned */
- bool all_fences;
+ /** @usage: Return fences with this usage or lower. */
+ enum dma_resv_usage usage;
/** @fence: the currently handled fence */
struct dma_fence *fence;
- /** @seq: sequence number to check for modifications */
- unsigned int seq;
+ /** @fence_usage: the usage of the current fence */
+ enum dma_resv_usage fence_usage;
/** @index: index into the shared fences */
unsigned int index;
@@ -180,8 +199,8 @@ struct dma_resv_iter {
/** @fences: the shared fences; private, *MUST* not dereference */
struct dma_resv_list *fences;
- /** @shared_count: number of shared fences */
- unsigned int shared_count;
+ /** @num_fences: number of fences */
+ unsigned int num_fences;
/** @is_restarted: true if this is the first returned fence */
bool is_restarted;
@@ -196,14 +215,14 @@ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
* dma_resv_iter_begin - initialize a dma_resv_iter object
* @cursor: The dma_resv_iter object to initialize
* @obj: The dma_resv object which we want to iterate over
- * @all_fences: If all fences should be returned or just the exclusive one
+ * @usage: controls which fences to include, see enum dma_resv_usage.
*/
static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
struct dma_resv *obj,
- bool all_fences)
+ enum dma_resv_usage usage)
{
cursor->obj = obj;
- cursor->all_fences = all_fences;
+ cursor->usage = usage;
cursor->fence = NULL;
}
@@ -220,14 +239,15 @@ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
}
/**
- * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * dma_resv_iter_usage - Return the usage of the current fence
* @cursor: the cursor of the current position
*
- * Returns true if the currently returned fence is the exclusive one.
+ * Returns the usage of the currently processed fence.
*/
-static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
{
- return cursor->index == 0;
+ return cursor->fence_usage;
}
/**
@@ -264,7 +284,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
* dma_resv_for_each_fence - fence iterator
* @cursor: a struct dma_resv_iter pointer
* @obj: a dma_resv object pointer
- * @all_fences: true if all fences should be returned
+ * @usage: controls which fences to return
* @fence: the current fence
*
* Iterate over the fences in a struct dma_resv object while holding the
@@ -273,8 +293,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
* valid as long as the lock is held and so no extra reference to the fence is
* taken.
*/
-#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \
- for (dma_resv_iter_begin(cursor, obj, all_fences), \
+#define dma_resv_for_each_fence(cursor, obj, usage, fence) \
+ for (dma_resv_iter_begin(cursor, obj, usage), \
fence = dma_resv_iter_first(cursor); fence; \
fence = dma_resv_iter_next(cursor))
@@ -282,9 +302,9 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
#ifdef CONFIG_DEBUG_MUTEXES
-void dma_resv_reset_shared_max(struct dma_resv *obj);
+void dma_resv_reset_max_fences(struct dma_resv *obj);
#else
-static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
#endif
/**
@@ -430,51 +450,26 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
*/
static inline void dma_resv_unlock(struct dma_resv *obj)
{
- dma_resv_reset_shared_max(obj);
+ dma_resv_reset_max_fences(obj);
ww_mutex_unlock(&obj->lock);
}
-/**
- * dma_resv_excl_fence - return the object's exclusive fence
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any). Caller must either hold the objects
- * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
- * or one of the variants of each
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-dma_resv_excl_fence(struct dma_resv *obj)
-{
- return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
-}
-
-/**
- * dma_resv_shared_list - get the reservation object's shared fence list
- * @obj: the reservation object
- *
- * Returns the shared fence list. Caller must either hold the objects
- * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
- * or one of the variants of each
- */
-static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
-{
- return rcu_dereference_check(obj->fence, dma_resv_held(obj));
-}
-
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-int dma_resv_get_fences(struct dma_resv *obj, bool write,
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage);
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage);
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
unsigned int *num_fences, struct dma_fence ***fences);
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
+ struct dma_fence **fence);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout);
-bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout);
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ccd4d3f91c98..0cbbc4103632 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1329,10 +1329,6 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
}
#endif
-#ifdef CONFIG_SYSFB
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
-#else
-static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { }
-#endif
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 9a77ab615c36..f95da1af9ff6 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -450,7 +450,6 @@ struct fb_info {
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
- struct work_struct queue; /* Framebuffer event queue */
struct fb_pixmap pixmap; /* Image hardware mapper */
struct fb_pixmap sprite; /* Cursor hardware mapper */
struct fb_cmap cmap; /* Current cmap */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 761f8f1885c7..3e3d36fc2109 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -613,9 +613,11 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
#ifdef CONFIG_NUMA
struct page *alloc_pages(gfp_t gfp, unsigned int order);
struct folio *folio_alloc(gfp_t gfp, unsigned order);
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
bool hugepage);
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr, bool hugepage);
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages_vma(gfp_mask, order, vma, addr, true)
#else
@@ -627,8 +629,10 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node(gfp, order, numa_node_id());
}
-#define alloc_pages_vma(gfp_mask, order, vma, addr, false)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \
alloc_pages(gfp_mask, order)
+#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
+ folio_alloc(gfp, order)
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 98c93510640e..874aabd270c9 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -222,6 +222,15 @@ struct gpio_irq_chip {
bool per_parent_data;
/**
+ * @initialized:
+ *
+ * Flag to track GPIO chip irq member's initialization.
+ * This flag will make sure GPIO chip irq members are not used
+ * before they are initialized.
+ */
+ bool initialized;
+
+ /**
* @init_hw: optional routine to initialize hardware before
* an IRQ chip will be added. This is quite useful when
* a particular driver wants to clear IRQ related registers
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index c7b47399b36a..57fb972fea05 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -120,7 +120,6 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs; /* use default_groups instead */
const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 6d635e8306d6..975e33b793a7 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -44,9 +44,9 @@ static inline void local_lock_debug_init(local_lock_t *l)
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
-# define local_lock_acquire(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
-# define local_lock_release(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
-# define local_lock_debug_init(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
+static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 71101d1ec825..de5c64bbdb72 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -175,7 +175,7 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
int retries);
-int mmc_hw_reset(struct mmc_host *host);
+int mmc_hw_reset(struct mmc_card *card);
int mmc_sw_reset(struct mmc_host *host);
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 962b14d403e8..46ffab808f03 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1397,13 +1397,16 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
+ unsigned long root = SECTION_NR_TO_ROOT(nr);
+
+ if (unlikely(root >= NR_SECTION_ROOTS))
+ return NULL;
+
#ifdef CONFIG_SPARSEMEM_EXTREME
- if (!mem_section)
+ if (!mem_section || !mem_section[root])
return NULL;
#endif
- if (!mem_section[SECTION_NR_TO_ROOT(nr)])
- return NULL;
- return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+ return &mem_section[root][nr & SECTION_ROOT_MASK];
}
extern size_t mem_section_usage_size(void);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 49ba486aea5f..2863e5a69c6a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1694,6 +1694,7 @@ struct nfs_unlinkdata {
struct nfs_renamedata {
struct nfs_renameargs args;
struct nfs_renameres res;
+ struct rpc_task task;
const struct cred *cred;
struct inode *old_dir;
struct dentry *old_dentry;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 37ded6b8fee6..3926e9027947 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -17,7 +17,6 @@
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
-#include <linux/ww_mutex.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
@@ -164,7 +163,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* static initializer or init function. This enables lockdep to validate
* that the write side critical section is properly serialized.
*
- * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
+ * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
*/
/*
@@ -184,7 +183,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
-#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex)
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
@@ -277,7 +275,6 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_s
SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
-SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
@@ -304,8 +301,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
- __seqprop_case((s), mutex, prop), \
- __seqprop_case((s), ww_mutex, prop))
+ __seqprop_case((s), mutex, prop))
#define seqprop_ptr(s) __seqprop(s, ptr)
#define seqprop_sequence(s) __seqprop(s, sequence)
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
index 3e56a9751c06..df53bed9d71f 100644
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -180,13 +180,13 @@ extern int static_call_text_reserved(void *start, void *end);
extern long __static_call_return0(void);
-#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
+#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = _func_init, \
+ .func = _func, \
.type = 1, \
}; \
- ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
@@ -196,6 +196,14 @@ extern long __static_call_return0(void);
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = __static_call_return0, \
+ .type = 1, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+
#define static_call_cond(name) (void)__static_call(name)
#define EXPORT_STATIC_CALL(name) \
@@ -217,12 +225,12 @@ extern long __static_call_return0(void);
static inline int static_call_init(void) { return 0; }
-#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
+#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = _func_init, \
+ .func = _func, \
}; \
- ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
@@ -231,6 +239,12 @@ static inline int static_call_init(void) { return 0; }
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = __static_call_return0, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
#define static_call_cond(name) (void)__static_call(name)
@@ -248,10 +262,7 @@ static inline int static_call_text_reserved(void *start, void *end)
return 0;
}
-static inline long __static_call_return0(void)
-{
- return 0;
-}
+extern long __static_call_return0(void);
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
@@ -281,11 +292,14 @@ static inline long __static_call_return0(void)
.func = _func_init, \
}
+#define DEFINE_STATIC_CALL(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, _func)
+
#define DEFINE_STATIC_CALL_NULL(name, _func) \
- DECLARE_STATIC_CALL(name, _func); \
- struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = NULL, \
- }
+ __DEFINE_STATIC_CALL(name, _func, NULL)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
static inline void __static_call_nop(void) { }
@@ -327,10 +341,4 @@ static inline int static_call_text_reserved(void *start, void *end)
#endif /* CONFIG_HAVE_STATIC_CALL */
-#define DEFINE_STATIC_CALL(name, _func) \
- __DEFINE_STATIC_CALL(name, _func, _func)
-
-#define DEFINE_STATIC_CALL_RET0(name, _func) \
- __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
-
#endif /* _LINUX_STATIC_CALL_H */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 45a9530d3839..522bbf937957 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -144,7 +144,7 @@ struct rpc_xprt_ops {
unsigned short (*get_srcport)(struct rpc_xprt *xprt);
int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task);
- void (*prepare_request)(struct rpc_rqst *req);
+ int (*prepare_request)(struct rpc_rqst *req);
int (*send_request)(struct rpc_rqst *req);
void (*wait_for_reply_request)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -358,10 +358,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_free_slot(struct rpc_xprt *xprt,
struct rpc_rqst *req);
-void xprt_request_prepare(struct rpc_rqst *req);
bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task);
-void xprt_request_enqueue_receive(struct rpc_task *task);
+int xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task);
void xprt_request_dequeue_xprt(struct rpc_task *task);
bool xprt_request_need_retransmit(struct rpc_task *task);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index dafdc7f48c01..b341dd62aa4d 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -23,8 +23,6 @@ struct virtio_shm_region {
* any of @get/@set, @get_status/@set_status, or @get_features/
* @finalize_features are NOT safe to be called from an atomic
* context.
- * @enable_cbs: enable the callbacks
- * vdev: the virtio_device
* @get: read the value of a configuration field
* vdev: the virtio_device
* offset: the offset of the configuration field
@@ -78,7 +76,6 @@ struct virtio_shm_region {
*/
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
- void (*enable_cbs)(struct virtio_device *vdev);
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
@@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev)
{
unsigned status = dev->config->get_status(dev);
- if (dev->config->enable_cbs)
- dev->config->enable_cbs(dev);
-
BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
}
diff --git a/include/net/mctp.h b/include/net/mctp.h
index d37268fe6825..82800d521c3d 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -36,8 +36,6 @@ struct mctp_hdr {
#define MCTP_HDR_TAG_SHIFT 0
#define MCTP_HDR_TAG_MASK GENMASK(2, 0)
-#define MCTP_HEADER_MAXLEN 4
-
#define MCTP_INITIAL_DEFAULT_NET 1
static inline bool mctp_address_unicast(mctp_eid_t eid)
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 0f34f13ebd55..5337acfe1e9c 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1004,7 +1004,6 @@ DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
-DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
DECLARE_EVENT_CLASS(rpc_xprt_event,
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 8277644c1144..26549c86a91f 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2022 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -92,6 +92,12 @@ extern "C" {
*
* DRM_VMW_PARAM_SM5
* SM5 support is enabled.
+ *
+ * DRM_VMW_PARAM_GL43
+ * SM5.1+GL4.3 support is enabled.
+ *
+ * DRM_VMW_PARAM_DEVICE_ID
+ * PCI ID of the underlying SVGA device.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -111,6 +117,7 @@ extern "C" {
#define DRM_VMW_PARAM_SM4_1 14
#define DRM_VMW_PARAM_SM5 15
#define DRM_VMW_PARAM_GL43 16
+#define DRM_VMW_PARAM_DEVICE_ID 17
/**
* enum drm_vmw_handle_type - handle type for ref ioctls