summaryrefslogtreecommitdiff
path: root/include/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-03-16 16:45:12 +1000
committerDave Airlie <airlied@redhat.com>2021-03-16 17:08:46 +1000
commit51c3b916a4d7e24b4918925965867fdd9bd8dd59 (patch)
tree3257e3e0fda7fbb0fe1425177b0c661db1bfee63 /include/drm
parent1e28eed17697bcf343c6743f0028cc3b5dd88bf0 (diff)
parent762949bb1da78941b25e63f7e952af037eee15a9 (diff)
downloadlinux-51c3b916a4d7e24b4918925965867fdd9bd8dd59.tar.gz
Merge tag 'drm-misc-next-2021-03-03' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.13: UAPI Changes: Cross-subsystem Changes: Core Changes: - %p4cc printk format modifier - atomic: introduce drm_crtc_commit_wait, rework atomic plane state helpers to take the drm_commit_state structure - dma-buf: heaps rework to return a struct dma_buf - simple-kms: Add plate state helpers - ttm: debugfs support, removal of sysfs Driver Changes: - Convert drivers to shadow plane helpers - arc: Move to drm/tiny - ast: cursor plane reworks - gma500: Remove TTM and medfield support - mxsfb: imx8mm support - panfrost: MMU IRQ handling rework - qxl: rework to better handle resources deallocation, locking - sun4i: Add alpha properties for UI and VI layers - vc4: RPi4 CEC support - vmwgfx: doc cleanup Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20210303100600.dgnkadonzuvfnu22@gilmour
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/drm_atomic.h4
-rw-r--r--include/drm/drm_gem_atomic_helper.h113
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h7
-rw-r--r--include/drm/drm_gem_vram_helper.h6
-rw-r--r--include/drm/drm_modeset_helper_vtables.h31
-rw-r--r--include/drm/drm_plane.h25
-rw-r--r--include/drm/drm_simple_kms_helper.h29
-rw-r--r--include/drm/drm_vblank.h1
-rw-r--r--include/drm/gpu_scheduler.h23
-rw-r--r--include/drm/ttm/ttm_bo_api.h48
-rw-r--r--include/drm/ttm/ttm_bo_driver.h329
-rw-r--r--include/drm/ttm/ttm_device.h318
-rw-r--r--include/drm/ttm/ttm_memory.h95
-rw-r--r--include/drm/ttm/ttm_resource.h4
-rw-r--r--include/drm/ttm/ttm_tt.h15
15 files changed, 553 insertions, 495 deletions
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ce7023e9115d..ac5a28eff2c8 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -66,6 +66,8 @@
*
* For an implementation of how to use this look at
* drm_atomic_helper_setup_commit() from the atomic helper library.
+ *
+ * See also drm_crtc_commit_wait().
*/
struct drm_crtc_commit {
/**
@@ -436,6 +438,8 @@ static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit)
kref_put(&commit->ref, __drm_crtc_commit_free);
}
+int drm_crtc_commit_wait(struct drm_crtc_commit *commit);
+
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
new file mode 100644
index 000000000000..cfc5adee3d13
--- /dev/null
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DRM_GEM_ATOMIC_HELPER_H__
+#define __DRM_GEM_ATOMIC_HELPER_H__
+
+#include <linux/dma-buf-map.h>
+
+#include <drm/drm_plane.h>
+
+struct drm_simple_display_pipe;
+
+/*
+ * Plane Helpers
+ */
+
+int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state);
+int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/*
+ * Helpers for planes with shadow buffers
+ */
+
+/**
+ * struct drm_shadow_plane_state - plane state for planes with shadow buffers
+ *
+ * For planes that use a shadow buffer, struct drm_shadow_plane_state
+ * provides the regular plane state plus mappings of the shadow buffer
+ * into kernel address space.
+ */
+struct drm_shadow_plane_state {
+ /** @base: plane state */
+ struct drm_plane_state base;
+
+ /* Transitional state - do not export or duplicate */
+
+ /**
+ * @map: Mappings of the plane's framebuffer BOs in to kernel address space
+ *
+ * The memory mappings stored in map should be established in the plane's
+ * prepare_fb callback and removed in the cleanup_fb callback.
+ */
+ struct dma_buf_map map[4];
+};
+
+/**
+ * to_drm_shadow_plane_state - upcasts from struct drm_plane_state
+ * @state: the plane state
+ */
+static inline struct drm_shadow_plane_state *
+to_drm_shadow_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct drm_shadow_plane_state, base);
+}
+
+void drm_gem_reset_shadow_plane(struct drm_plane *plane);
+struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane);
+void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_plane_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_funcs to use the rsp helper functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_FUNCS \
+ .reset = drm_gem_reset_shadow_plane, \
+ .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \
+ .atomic_destroy_state = drm_gem_destroy_shadow_plane_state
+
+int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS -
+ * Initializes struct drm_plane_helper_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \
+ .prepare_fb = drm_gem_prepare_shadow_fb, \
+ .cleanup_fb = drm_gem_cleanup_shadow_fb
+
+int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *
+drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe);
+void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_simple_display_pipe_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_simple_display_pipe_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \
+ .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \
+ .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \
+ .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \
+ .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \
+ .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state
+
+#endif /* __DRM_GEM_ATOMIC_HELPER_H__ */
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 6b013154911d..6bdffc7aa124 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -9,9 +9,6 @@ struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
-struct drm_plane;
-struct drm_plane_state;
-struct drm_simple_display_pipe;
#define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52)
@@ -44,8 +41,4 @@ int drm_gem_fb_afbc_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb);
-int drm_gem_fb_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state);
-int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index a4bac02249c2..288055d397d9 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -172,19 +172,19 @@ struct drm_vram_mm {
uint64_t vram_base;
size_t vram_size;
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
};
/**
* drm_vram_mm_of_bdev() - \
- Returns the container of type &struct ttm_bo_device for field bdev.
+ Returns the container of type &struct ttm_device for field bdev.
* @bdev: the TTM BO device
*
* Returns:
* The containing instance of &struct drm_vram_mm
*/
static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
- struct ttm_bo_device *bdev)
+ struct ttm_device *bdev)
{
return container_of(bdev, struct drm_vram_mm, bdev);
}
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index eb706342861d..f3a4b47b3986 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1179,7 +1179,7 @@ struct drm_plane_helper_funcs {
* members in the plane structure.
*
* Drivers which always have their buffers pinned should use
- * drm_gem_fb_prepare_fb() for this hook.
+ * drm_gem_plane_helper_prepare_fb() for this hook.
*
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
@@ -1233,9 +1233,8 @@ struct drm_plane_helper_funcs {
* NOTE:
*
* This function is called in the check phase of an atomic update. The
- * driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * driver is not allowed to change anything outside of the
+ * &drm_atomic_state update tracking structure.
*
* RETURNS:
*
@@ -1245,7 +1244,7 @@ struct drm_plane_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_update:
@@ -1263,7 +1262,7 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
*
@@ -1287,14 +1286,14 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_disable)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_check:
*
- * Drivers should set this function pointer to check if the plane state
- * can be updated in a async fashion. Here async means "not vblank
- * synchronized".
+ * Drivers should set this function pointer to check if the plane's
+ * atomic state can be updated in a async fashion. Here async means
+ * "not vblank synchronized".
*
* This hook is called by drm_atomic_async_check() to establish if a
* given update can be committed asynchronously, that is, if it can
@@ -1306,7 +1305,7 @@ struct drm_plane_helper_funcs {
* can not be applied in asynchronous manner.
*/
int (*atomic_async_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_update:
@@ -1322,11 +1321,9 @@ struct drm_plane_helper_funcs {
* update won't happen if there is an outstanding commit modifying
* the same plane.
*
- * Note that unlike &drm_plane_helper_funcs.atomic_update this hook
- * takes the new &drm_plane_state as parameter. When doing async_update
- * drivers shouldn't replace the &drm_plane_state but update the
- * current one with the new plane configurations in the new
- * plane_state.
+ * When doing async_update drivers shouldn't replace the
+ * &drm_plane_state but update the current one with the new plane
+ * configurations in the new plane_state.
*
* Drivers should also swap the framebuffers between current plane
* state (&drm_plane.state) and new_state.
@@ -1345,7 +1342,7 @@ struct drm_plane_helper_funcs {
* for deferring if needed, until a common solution is created.
*/
void (*atomic_async_update)(struct drm_plane *plane,
- struct drm_plane_state *new_state);
+ struct drm_atomic_state *state);
};
/**
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8ef06ee1c8eb..1294610e84f4 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -79,8 +79,8 @@ struct drm_plane_state {
* preserved.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
- * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
+ * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb()
+ * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers.
*/
struct dma_fence *fence;
@@ -538,10 +538,14 @@ struct drm_plane_funcs {
*
* For compatibility with legacy userspace, only overlay planes are made
* available to userspace by default. Userspace clients may set the
- * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
* wish to receive a universal plane list containing all plane types. See also
* drm_for_each_legacy_plane().
*
+ * In addition to setting each plane's type, drivers need to setup the
+ * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy
+ * IOCTLs. See drm_crtc_init_with_planes().
+ *
* WARNING: The values of this enum is UABI since they're exposed in the "type"
* property.
*/
@@ -557,19 +561,20 @@ enum drm_plane_type {
/**
* @DRM_PLANE_TYPE_PRIMARY:
*
- * Primary planes represent a "main" plane for a CRTC. Primary planes
- * are the planes operated upon by CRTC modesetting and flipping
- * operations described in the &drm_crtc_funcs.page_flip and
- * &drm_crtc_funcs.set_config hooks.
+ * A primary plane attached to a CRTC is the most likely to be able to
+ * light up the CRTC when no scaling/cropping is used and the plane
+ * covers the whole CRTC.
*/
DRM_PLANE_TYPE_PRIMARY,
/**
* @DRM_PLANE_TYPE_CURSOR:
*
- * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes
- * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
- * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+ * A cursor plane attached to a CRTC is more likely to be able to be
+ * enabled when no scaling/cropping is used and the framebuffer has the
+ * size indicated by &drm_mode_config.cursor_width and
+ * &drm_mode_config.cursor_height. Additionally, if the driver doesn't
+ * support modifiers, the framebuffer should have a linear layout.
*/
DRM_PLANE_TYPE_CURSOR,
};
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index e6dbf3161c2f..ef9944e9c5fc 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -117,7 +117,7 @@ struct drm_simple_display_pipe_funcs {
* more details.
*
* Drivers which always have their buffers pinned should use
- * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
+ * drm_gem_simple_display_pipe_prepare_fb() for this hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
@@ -149,6 +149,33 @@ struct drm_simple_display_pipe_funcs {
* more details.
*/
void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @reset_plane:
+ *
+ * Optional, called by &drm_plane_funcs.reset. Please read the
+ * documentation for the &drm_plane_funcs.reset hook for more details.
+ */
+ void (*reset_plane)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @duplicate_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_duplicate_state
+ * hook for more details.
+ */
+ struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @destroy_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_destroy_state
+ * hook for more details.
+ */
+ void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
};
/**
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index dd125f8c766c..733a3e2d1d10 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -247,7 +247,6 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
-void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 975e8a67947f..1c815e0a14ed 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -206,6 +206,12 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
return s_job && atomic_inc_return(&s_job->karma) > threshold;
}
+enum drm_gpu_sched_stat {
+ DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
+ DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_ENODEV,
+};
+
/**
* struct drm_sched_backend_ops
*
@@ -230,10 +236,16 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
/**
- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
+ *
+ * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
+ * and the underlying driver has started or completed recovery.
+ *
+ * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
+ * available, i.e. has been unplugged.
*/
- void (*timedout_job)(struct drm_sched_job *sched_job);
+ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
/**
* @free_job: Called once the job's finished fence has been signaled
@@ -285,7 +297,8 @@ struct drm_gpu_scheduler {
struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
+ atomic_t *score;
+ atomic_t _score;
bool ready;
bool free_guilty;
};
@@ -293,7 +306,7 @@ struct drm_gpu_scheduler {
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
- const char *name);
+ atomic_t *score, const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index e17be324d95f..4fb523dfab32 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,9 +44,9 @@
#include "ttm_resource.h"
-struct ttm_bo_global;
+struct ttm_global;
-struct ttm_bo_device;
+struct ttm_device;
struct dma_buf_map;
@@ -88,7 +88,6 @@ struct ttm_tt;
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
- * @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement.
@@ -122,10 +121,9 @@ struct ttm_buffer_object {
* Members constant at init.
*/
- struct ttm_bo_device *bdev;
+ struct ttm_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
- size_t acc_size;
/**
* Members not needing protection.
@@ -313,7 +311,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
* @bulk: optional bulk move structure to remember BO positions
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
- * object. This function must be called with struct ttm_bo_global::lru_lock
+ * object. This function must be called with struct ttm_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
@@ -326,7 +324,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
* @bulk: bulk move structure
*
* Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
+ * BO order never changes. Should be called with ttm_global::lru_lock held.
*/
void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
@@ -337,14 +335,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
* Returns
* True if the workqueue was queued at the time
*/
-int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
/**
* ttm_bo_unlock_delayed_workqueue
*
* Allows the delayed workqueue to run.
*/
-void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
+void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
/**
* ttm_bo_eviction_valuable
@@ -357,21 +355,16 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
-size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size);
-
/**
* ttm_bo_init_reserved
*
- * @bdev: Pointer to a ttm_bo_device struct.
+ * @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
- * @acc_size: Accounted size for this object.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -396,20 +389,19 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
+int ttm_bo_init_reserved(struct ttm_device *bdev,
struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
struct ttm_operation_ctx *ctx,
- size_t acc_size, struct sg_table *sg,
- struct dma_resv *resv,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_bo_init
*
- * @bdev: Pointer to a ttm_bo_device struct.
+ * @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
@@ -421,7 +413,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
- * @acc_size: Accounted size for this object.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -443,10 +434,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
+int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type,
struct ttm_placement *placement,
- uint32_t page_alignment, bool interrubtible, size_t acc_size,
+ uint32_t page_alignment, bool interrubtible,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
@@ -537,18 +528,18 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
*
* @filp: filp as input from the mmap method.
* @vma: vma as input from the mmap method.
- * @bdev: Pointer to the ttm_bo_device with the address space manager.
+ * @bdev: Pointer to the ttm_device with the address space manager.
*
* This function is intended to be called by the device mmap method.
* if the device address space is to be backed by the bo manager.
*/
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev);
+ struct ttm_device *bdev);
/**
* ttm_bo_io
*
- * @bdev: Pointer to the struct ttm_bo_device.
+ * @bdev: Pointer to the struct ttm_device.
* @filp: Pointer to the struct file attempting to read / write.
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
* @rbuf: User-space pointer to address of buffer to read into.
@@ -565,11 +556,11 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
-int ttm_bo_swapout(struct ttm_operation_ctx *ctx);
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
@@ -617,7 +608,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
--bo->pin_count;
}
-int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
@@ -642,5 +633,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma);
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 423348414c59..8959c0075cfd 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -37,302 +37,14 @@
#include <linux/spinlock.h>
#include <linux/dma-resv.h>
+#include <drm/ttm/ttm_device.h>
+
#include "ttm_bo_api.h"
-#include "ttm_memory.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#include "ttm_pool.h"
/**
- * struct ttm_bo_driver
- *
- * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
- * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
- * @move: Callback for a driver to hook in accelerated functions to
- * move a buffer.
- * If set to NULL, a potentially slow memcpy() move is used.
- */
-
-struct ttm_bo_driver {
- /**
- * ttm_tt_create
- *
- * @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- *
- * Create a struct ttm_tt to back data with system memory pages.
- * No pages are actually allocated.
- * Returns:
- * NULL: Out of memory.
- */
- struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
- uint32_t page_flags);
-
- /**
- * ttm_tt_populate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Allocate all backing pages
- * Returns:
- * -ENOMEM: Out of memory.
- */
- int (*ttm_tt_populate)(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm,
- struct ttm_operation_ctx *ctx);
-
- /**
- * ttm_tt_unpopulate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Free all backing page
- */
- void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
- /**
- * ttm_tt_destroy
- *
- * @bdev: Pointer to a ttm device
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Destroy the backend. This will be call back from ttm_tt_destroy so
- * don't call ttm_tt_destroy from the callback or infinite loop.
- */
- void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
- /**
- * struct ttm_bo_driver member eviction_valuable
- *
- * @bo: the buffer object to be evicted
- * @place: placement we need room for
- *
- * Check with the driver if it is valuable to evict a BO to make room
- * for a certain placement.
- */
- bool (*eviction_valuable)(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
- /**
- * struct ttm_bo_driver member evict_flags:
- *
- * @bo: the buffer object to be evicted
- *
- * Return the bo flags for a buffer which is not mapped to the hardware.
- * These will be placed in proposed_flags so that when the move is
- * finished, they'll end up in bo->mem.flags
- * This should not cause multihop evictions, and the core will warn
- * if one is proposed.
- */
-
- void (*evict_flags)(struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
-
- /**
- * struct ttm_bo_driver member move:
- *
- * @bo: the buffer to move
- * @evict: whether this motion is evicting the buffer from
- * the graphics address space
- * @ctx: context for this move with parameters
- * @new_mem: the new memory region receiving the buffer
- @ @hop: placement for driver directed intermediate hop
- *
- * Move a buffer between two memory regions.
- * Returns errno -EMULTIHOP if driver requests a hop
- */
- int (*move)(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem,
- struct ttm_place *hop);
-
- /**
- * struct ttm_bo_driver_member verify_access
- *
- * @bo: Pointer to a buffer object.
- * @filp: Pointer to a struct file trying to access the object.
- *
- * Called from the map / write / read methods to verify that the
- * caller is permitted to access the buffer object.
- * This member may be set to NULL, which will refuse this kind of
- * access for all buffer objects.
- * This function should return 0 if access is granted, -EPERM otherwise.
- */
- int (*verify_access)(struct ttm_buffer_object *bo,
- struct file *filp);
-
- /**
- * Hook to notify driver about a resource delete.
- */
- void (*delete_mem_notify)(struct ttm_buffer_object *bo);
-
- /**
- * notify the driver that we're about to swap out this bo
- */
- void (*swap_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Driver callback on when mapping io memory (for bo_move_memcpy
- * for instance). TTM will take care to call io_mem_free whenever
- * the mapping is not use anymore. io_mem_reserve & io_mem_free
- * are balanced.
- */
- int (*io_mem_reserve)(struct ttm_bo_device *bdev,
- struct ttm_resource *mem);
- void (*io_mem_free)(struct ttm_bo_device *bdev,
- struct ttm_resource *mem);
-
- /**
- * Return the pfn for a given page_offset inside the BO.
- *
- * @bo: the BO to look up the pfn for
- * @page_offset: the offset to look up
- */
- unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
- unsigned long page_offset);
-
- /**
- * Read/write memory buffers for ptrace access
- *
- * @bo: the BO to access
- * @offset: the offset from the start of the BO
- * @buf: pointer to source/destination buffer
- * @len: number of bytes to copy
- * @write: whether to read (0) from or write (non-0) to BO
- *
- * If successful, this function should return the number of
- * bytes copied, -EIO otherwise. If the number of bytes
- * returned is < len, the function may be called again with
- * the remainder of the buffer to copy.
- */
- int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
- void *buf, int len, int write);
-
- /**
- * struct ttm_bo_driver member del_from_lru_notify
- *
- * @bo: the buffer object deleted from lru
- *
- * notify driver that a BO was deleted from LRU.
- */
- void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Notify the driver that we're about to release a BO
- *
- * @bo: BO that is about to be released
- *
- * Gives the driver a chance to do any cleanup, including
- * adding fences that may force a delayed delete
- */
- void (*release_notify)(struct ttm_buffer_object *bo);
-};
-
-/**
- * struct ttm_bo_global - Buffer object driver global data.
- *
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
- * @shrink: A shrink callback object used for buffer object swap.
- * @device_list_mutex: Mutex protecting the device list.
- * This mutex is held while traversing the device list for pm options.
- * @lru_lock: Spinlock protecting the bo subsystem lru lists.
- * @device_list: List of buffer object devices.
- * @swap_lru: Lru list of buffer objects used for swapping.
- */
-
-extern struct ttm_bo_global {
-
- /**
- * Constant after init.
- */
-
- struct kobject kobj;
- struct page *dummy_read_page;
- spinlock_t lru_lock;
-
- /**
- * Protected by ttm_global_mutex.
- */
- struct list_head device_list;
-
- /**
- * Protected by the lru_lock.
- */
- struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
-
- /**
- * Internal protection.
- */
- atomic_t bo_count;
-} ttm_bo_glob;
-
-
-#define TTM_NUM_MEM_TYPES 8
-
-/**
- * struct ttm_bo_device - Buffer object driver device-specific data.
- *
- * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
- * @man: An array of resource_managers.
- * @vma_manager: Address space manager (pointer)
- * lru_lock: Spinlock that protects the buffer+device lru lists and
- * ddestroy lists.
- * @dev_mapping: A pointer to the struct address_space representing the
- * device address space.
- * @wq: Work queue structure for the delayed delete workqueue.
- *
- */
-
-struct ttm_bo_device {
-
- /*
- * Constant after bo device init / atomic.
- */
- struct list_head device_list;
- struct ttm_bo_driver *driver;
- /*
- * access via ttm_manager_type.
- */
- struct ttm_resource_manager sysman;
- struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
- /*
- * Protected by internal locks.
- */
- struct drm_vma_offset_manager *vma_manager;
- struct ttm_pool pool;
-
- /*
- * Protected by the global:lru lock.
- */
- struct list_head ddestroy;
-
- /*
- * Protected by load / firstopen / lastclose /unload sync.
- */
-
- struct address_space *dev_mapping;
-
- /*
- * Internal protection.
- */
-
- struct delayed_work wq;
-};
-
-static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev,
- int mem_type)
-{
- return bdev->man_drv[mem_type];
-}
-
-static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev,
- int type,
- struct ttm_resource_manager *manager)
-{
- bdev->man_drv[type] = manager;
-}
-
-/**
* struct ttm_lru_bulk_move_pos
*
* @first: first BO in the bulk move range
@@ -388,31 +100,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct ttm_operation_ctx *ctx);
-int ttm_bo_device_release(struct ttm_bo_device *bdev);
-
-/**
- * ttm_bo_device_init
- *
- * @bdev: A pointer to a struct ttm_bo_device to initialize.
- * @glob: A pointer to an initialized struct ttm_bo_global.
- * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
- * @dev: The core kernel device pointer for DMA mappings and allocations.
- * @mapping: The address space to use for this bo.
- * @vma_manager: A pointer to a vma manager.
- * @use_dma_alloc: If coherent DMA allocation API should be used.
- * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
- *
- * Initializes a struct ttm_bo_device:
- * Returns:
- * !0: Failure.
- */
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_driver *driver,
- struct device *dev,
- struct address_space *mapping,
- struct drm_vma_offset_manager *vma_manager,
- bool use_dma_alloc, bool use_dma32);
-
/**
* ttm_bo_unmap_virtual
*
@@ -494,9 +181,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
}
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
@@ -538,9 +225,9 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
/*
* ttm_bo_util.c
*/
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+int ttm_mem_io_reserve(struct ttm_device *bdev,
struct ttm_resource *mem);
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
+void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem);
/**
@@ -631,7 +318,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
* Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
*/
-int ttm_range_man_init(struct ttm_bo_device *bdev,
+int ttm_range_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size);
@@ -643,7 +330,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
*
* Remove the generic range manager from a slot and tear it down.
*/
-int ttm_range_man_fini(struct ttm_bo_device *bdev,
+int ttm_range_man_fini(struct ttm_device *bdev,
unsigned type);
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
new file mode 100644
index 000000000000..035bbc044a3b
--- /dev/null
+++ b/include/drm/ttm/ttm_device.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_DEVICE_H_
+#define _TTM_DEVICE_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_pool.h>
+
+#define TTM_NUM_MEM_TYPES 8
+
+struct ttm_device;
+struct ttm_placement;
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+
+/**
+ * struct ttm_global - Buffer object driver global data.
+ *
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages.
+ * @shrink: A shrink callback object used for buffer object swap.
+ * @device_list_mutex: Mutex protecting the device list.
+ * This mutex is held while traversing the device list for pm options.
+ * @lru_lock: Spinlock protecting the bo subsystem lru lists.
+ * @device_list: List of buffer object devices.
+ * @swap_lru: Lru list of buffer objects used for swapping.
+ */
+extern struct ttm_global {
+
+ /**
+ * Constant after init.
+ */
+
+ struct page *dummy_read_page;
+ spinlock_t lru_lock;
+
+ /**
+ * Protected by ttm_global_mutex.
+ */
+ struct list_head device_list;
+
+ /**
+ * Protected by the lru_lock.
+ */
+ struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
+
+ /**
+ * Internal protection.
+ */
+ atomic_t bo_count;
+} ttm_glob;
+
+struct ttm_device_funcs {
+ /**
+ * ttm_tt_create
+ *
+ * @bo: The buffer object to create the ttm for.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_destroy
+ *
+ * @bdev: Pointer to a ttm device
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_bo_driver member eviction_valuable
+ *
+ * @bo: the buffer object to be evicted
+ * @place: placement we need room for
+ *
+ * Check with the driver if it is valuable to evict a BO to make room
+ * for a certain placement.
+ */
+ bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+ /**
+ * struct ttm_bo_driver member evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ * This should not cause multihop evictions, and the core will warn
+ * if one is proposed.
+ */
+
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
+ /**
+ * struct ttm_bo_driver member move:
+ *
+ * @bo: the buffer to move
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ * @ctx: context for this move with parameters
+ * @new_mem: the new memory region receiving the buffer
+ @ @hop: placement for driver directed intermediate hop
+ *
+ * Move a buffer between two memory regions.
+ * Returns errno -EMULTIHOP if driver requests a hop
+ */
+ int (*move)(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop);
+
+ /**
+ * struct ttm_bo_driver_member verify_access
+ *
+ * @bo: Pointer to a buffer object.
+ * @filp: Pointer to a struct file trying to access the object.
+ *
+ * Called from the map / write / read methods to verify that the
+ * caller is permitted to access the buffer object.
+ * This member may be set to NULL, which will refuse this kind of
+ * access for all buffer objects.
+ * This function should return 0 if access is granted, -EPERM otherwise.
+ */
+ int (*verify_access)(struct ttm_buffer_object *bo,
+ struct file *filp);
+
+ /**
+ * Hook to notify driver about a resource delete.
+ */
+ void (*delete_mem_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * notify the driver that we're about to swap out this bo
+ */
+ void (*swap_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+ void (*io_mem_free)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
+ /**
+ * Read/write memory buffers for ptrace access
+ *
+ * @bo: the BO to access
+ * @offset: the offset from the start of the BO
+ * @buf: pointer to source/destination buffer
+ * @len: number of bytes to copy
+ * @write: whether to read (0) from or write (non-0) to BO
+ *
+ * If successful, this function should return the number of
+ * bytes copied, -EIO otherwise. If the number of bytes
+ * returned is < len, the function may be called again with
+ * the remainder of the buffer to copy.
+ */
+ int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
+
+ /**
+ * struct ttm_bo_driver member del_from_lru_notify
+ *
+ * @bo: the buffer object deleted from lru
+ *
+ * notify driver that a BO was deleted from LRU.
+ */
+ void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Notify the driver that we're about to release a BO
+ *
+ * @bo: BO that is about to be released
+ *
+ * Gives the driver a chance to do any cleanup, including
+ * adding fences that may force a delayed delete
+ */
+ void (*release_notify)(struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_device - Buffer object driver device-specific data.
+ *
+ * @device_list: Our entry in the global device list.
+ * @funcs: Function table for the device.
+ * @sysman: Resource manager for the system domain.
+ * @man_drv: An array of resource_managers.
+ * @vma_manager: Address space manager.
+ * @pool: page pool for the device.
+ * @dev_mapping: A pointer to the struct address_space representing the
+ * device address space.
+ * @wq: Work queue structure for the delayed delete workqueue.
+ */
+struct ttm_device {
+ /*
+ * Constant after bo device init
+ */
+ struct list_head device_list;
+ struct ttm_device_funcs *funcs;
+
+ /*
+ * Access via ttm_manager_type.
+ */
+ struct ttm_resource_manager sysman;
+ struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
+
+ /*
+ * Protected by internal locks.
+ */
+ struct drm_vma_offset_manager *vma_manager;
+ struct ttm_pool pool;
+
+ /*
+ * Protected by the global:lru lock.
+ */
+ struct list_head ddestroy;
+
+ /*
+ * Protected by load / firstopen / lastclose /unload sync.
+ */
+ struct address_space *dev_mapping;
+
+ /*
+ * Internal protection.
+ */
+ struct delayed_work wq;
+};
+
+static inline struct ttm_resource_manager *
+ttm_manager_type(struct ttm_device *bdev, int mem_type)
+{
+ return bdev->man_drv[mem_type];
+}
+
+static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
+ struct ttm_resource_manager *manager)
+{
+ bdev->man_drv[type] = manager;
+}
+
+int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
+ struct device *dev, struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
+ bool use_dma_alloc, bool use_dma32);
+void ttm_device_fini(struct ttm_device *bdev);
+
+#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
deleted file mode 100644
index c1f167881e33..000000000000
--- a/include/drm/ttm/ttm_memory.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TTM_MEMORY_H
-#define TTM_MEMORY_H
-
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/bug.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-#include "ttm_bo_api.h"
-
-/**
- * struct ttm_mem_global - Global memory accounting structure.
- *
- * @shrink: A single callback to shrink TTM memory usage. Extend this
- * to a linked list to be able to handle multiple callbacks when needed.
- * @swap_queue: A workqueue to handle shrinking in low memory situations. We
- * need a separate workqueue since it will spend a lot of time waiting
- * for the GPU, and this will otherwise block other workqueue tasks(?)
- * At this point we use only a single-threaded workqueue.
- * @work: The workqueue callback for the shrink queue.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
- * @lower_mem_limit: include lower limit of swap space and lower limit of
- * system memory.
- * @zones: Array of pointers to accounting zones.
- * @num_zones: Number of populated entries in the @zones array.
- * @zone_kernel: Pointer to the kernel zone.
- * @zone_highmem: Pointer to the highmem zone if there is one.
- * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-#define TTM_MEM_MAX_ZONES 2
-struct ttm_mem_zone;
-extern struct ttm_mem_global {
- struct kobject kobj;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
- spinlock_t lock;
- uint64_t lower_mem_limit;
- struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
- unsigned int num_zones;
- struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
- struct ttm_mem_zone *zone_highmem;
-#else
- struct ttm_mem_zone *zone_dma32;
-#endif
-} ttm_mem_glob;
-
-int ttm_mem_global_init(struct ttm_mem_global *glob);
-void ttm_mem_global_release(struct ttm_mem_global *glob);
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount);
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size);
-size_t ttm_round_pot(size_t size);
-bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages,
- struct ttm_operation_ctx *ctx);
-#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index da0ed7e8c915..6164ccf4f308 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -33,7 +33,7 @@
#define TTM_MAX_BO_PRIORITY 4U
-struct ttm_bo_device;
+struct ttm_device;
struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
@@ -233,7 +233,7 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
-int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 6c8eb9a4de81..069f8130241a 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <drm/ttm/ttm_caching.h>
+struct ttm_bo_device;
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
@@ -118,14 +119,14 @@ void ttm_tt_fini(struct ttm_tt *ttm);
*
* Unbind, unpopulate and destroy common struct ttm_tt.
*/
-void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_destroy_common:
*
* Called from driver to destroy common path.
*/
-void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
@@ -135,7 +136,8 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
-int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
+ gfp_t gfp_flags);
/**
* ttm_tt_populate - allocate pages for a ttm
@@ -144,7 +146,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
*
* Calls the driver method to allocate pages for a ttm
*/
-int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate - free pages from a ttm
@@ -153,7 +155,10 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_o
*
* Calls the driver method to free all pages from a ttm
*/
-void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
+
+int ttm_tt_mgr_init(void);
+void ttm_tt_mgr_fini(void);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>