Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Been a bit busy, first week of kids school, and waiting on other trees
to go in before I could send this, so its a bit later than I'd
normally like.
Highlights:
- core:
timestamp fixes, lots of misc cleanups
- new drivers:
bochs virtual vga
- vmwgfx:
major overhaul for their nextgen virt gpu.
- i915:
runtime D3 on HSW, watermark fixes, power well work, fbc fixes,
bdw is no longer prelim.
- nouveau:
gk110/208 acceleration, more pm groundwork, old overlay support
- radeon:
dpm rework and clockgating for CIK, pci config reset, big endian
fixes
- tegra:
panel support and DSI support, build as module, prime.
- armada, omap, gma500, rcar, exynos, mgag200, cirrus, ast:
fixes
- msm:
hdmi support for mdp5"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (595 commits)
drm/nouveau: resume display if any later suspend bits fail
drm/nouveau: fix lock unbalance in nouveau_crtc_page_flip
drm/nouveau: implement hooks for needed for drm vblank timestamping support
drm/nouveau/disp: add a method to fetch info needed by drm vblank timestamping
drm/nv50: fill in crtc mode struct members from crtc_mode_fixup
drm/radeon/dce8: workaround for atom BlankCrtc table
drm/radeon/DCE4+: clear bios scratch dpms bit (v2)
drm/radeon: set si_notify_smc_display_change properly
drm/radeon: fix DAC interrupt handling on DCE5+
drm/radeon: clean up active vram sizing
drm/radeon: skip async dma init on r6xx
drm/radeon/runpm: don't runtime suspend non-PX cards
drm/radeon: add ring to fence trace functions
drm/radeon: add missing trace point
drm/radeon: fix VMID use tracking
drm: ast,cirrus,mgag200: use drm_can_sleep
drm/gma500: Lock struct_mutex around cursor updates
drm/i915: Fix the offset issue for the stolen GEM objects
DRM: armada: fix missing DRM_KMS_FB_HELPER select
drm/i915: Decouple GPU error reporting from ring initialisation
...
This commit is contained in:
@@ -181,7 +181,6 @@ enum drm_map_type {
|
||||
_DRM_AGP = 3, /**< AGP/GART */
|
||||
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
|
||||
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
|
||||
_DRM_GEM = 6, /**< GEM object (obsolete) */
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -222,6 +222,7 @@ typedef struct _drm_i915_sarea {
|
||||
#define DRM_I915_GEM_SET_CACHING 0x2f
|
||||
#define DRM_I915_GEM_GET_CACHING 0x30
|
||||
#define DRM_I915_REG_READ 0x31
|
||||
#define DRM_I915_GET_RESET_STATS 0x32
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
@@ -271,6 +272,7 @@ typedef struct _drm_i915_sarea {
|
||||
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
|
||||
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
|
||||
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
|
||||
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
|
||||
|
||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||
* on the security mechanisms provided by hardware.
|
||||
@@ -719,7 +721,7 @@ struct drm_i915_gem_execbuffer2 {
|
||||
*/
|
||||
#define I915_EXEC_IS_PINNED (1<<10)
|
||||
|
||||
/** Provide a hint to the kernel that the command stream and auxilliary
|
||||
/** Provide a hint to the kernel that the command stream and auxiliary
|
||||
* state buffers already holds the correct presumed addresses and so the
|
||||
* relocation process may be skipped if no buffers need to be moved in
|
||||
* preparation for the execbuffer.
|
||||
@@ -1030,4 +1032,21 @@ struct drm_i915_reg_read {
|
||||
__u64 offset;
|
||||
__u64 val; /* Return value */
|
||||
};
|
||||
|
||||
struct drm_i915_reset_stats {
|
||||
__u32 ctx_id;
|
||||
__u32 flags;
|
||||
|
||||
/* All resets since boot/module reload, for all contexts */
|
||||
__u32 reset_count;
|
||||
|
||||
/* Number of batches lost when active in GPU, for this context */
|
||||
__u32 batch_active;
|
||||
|
||||
/* Number of batches lost pending for execution, for this context */
|
||||
__u32 batch_pending;
|
||||
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
#endif /* _UAPI_I915_DRM_H_ */
|
||||
|
||||
@@ -985,6 +985,8 @@ struct drm_radeon_cs {
|
||||
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
|
||||
/* query the number of render backends */
|
||||
#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
|
||||
/* max engine clock - needed for OpenCL */
|
||||
#define RADEON_INFO_MAX_SCLK 0x1a
|
||||
|
||||
|
||||
struct drm_radeon_info {
|
||||
|
||||
@@ -28,6 +28,10 @@
|
||||
#ifndef __VMWGFX_DRM_H__
|
||||
#define __VMWGFX_DRM_H__
|
||||
|
||||
#ifndef __KERNEL__
|
||||
#include <drm.h>
|
||||
#endif
|
||||
|
||||
#define DRM_VMW_MAX_SURFACE_FACES 6
|
||||
#define DRM_VMW_MAX_MIP_LEVELS 24
|
||||
|
||||
@@ -55,6 +59,11 @@
|
||||
#define DRM_VMW_PRESENT 18
|
||||
#define DRM_VMW_PRESENT_READBACK 19
|
||||
#define DRM_VMW_UPDATE_LAYOUT 20
|
||||
#define DRM_VMW_CREATE_SHADER 21
|
||||
#define DRM_VMW_UNREF_SHADER 22
|
||||
#define DRM_VMW_GB_SURFACE_CREATE 23
|
||||
#define DRM_VMW_GB_SURFACE_REF 24
|
||||
#define DRM_VMW_SYNCCPU 25
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
@@ -76,6 +85,8 @@
|
||||
#define DRM_VMW_PARAM_MAX_FB_SIZE 5
|
||||
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
|
||||
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
|
||||
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
|
||||
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
|
||||
|
||||
/**
|
||||
* struct drm_vmw_getparam_arg
|
||||
@@ -788,4 +799,253 @@ struct drm_vmw_update_layout_arg {
|
||||
uint64_t rects;
|
||||
};
|
||||
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_CREATE_SHADER - Create shader
|
||||
*
|
||||
* Creates a shader and optionally binds it to a dma buffer containing
|
||||
* the shader byte-code.
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum drm_vmw_shader_type - Shader types
|
||||
*/
|
||||
enum drm_vmw_shader_type {
|
||||
drm_vmw_shader_type_vs = 0,
|
||||
drm_vmw_shader_type_ps,
|
||||
drm_vmw_shader_type_gs
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct drm_vmw_shader_create_arg
|
||||
*
|
||||
* @shader_type: Shader type of the shader to create.
|
||||
* @size: Size of the byte-code in bytes.
|
||||
* where the shader byte-code starts
|
||||
* @buffer_handle: Buffer handle identifying the buffer containing the
|
||||
* shader byte-code
|
||||
* @shader_handle: On successful completion contains a handle that
|
||||
* can be used to subsequently identify the shader.
|
||||
* @offset: Offset in bytes into the buffer given by @buffer_handle,
|
||||
*
|
||||
* Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
|
||||
*/
|
||||
struct drm_vmw_shader_create_arg {
|
||||
enum drm_vmw_shader_type shader_type;
|
||||
uint32_t size;
|
||||
uint32_t buffer_handle;
|
||||
uint32_t shader_handle;
|
||||
uint64_t offset;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_UNREF_SHADER - Unreferences a shader
|
||||
*
|
||||
* Destroys a user-space reference to a shader, optionally destroying
|
||||
* it.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_shader_arg
|
||||
*
|
||||
* @handle: Handle identifying the shader to destroy.
|
||||
*
|
||||
* Input argument to the DRM_VMW_UNREF_SHADER ioctl.
|
||||
*/
|
||||
struct drm_vmw_shader_arg {
|
||||
uint32_t handle;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
|
||||
*
|
||||
* Allocates a surface handle and queues a create surface command
|
||||
* for the host on the first use of the surface. The surface ID can
|
||||
* be used as the surface ID in commands referencing the surface.
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum drm_vmw_surface_flags
|
||||
*
|
||||
* @drm_vmw_surface_flag_shareable: Whether the surface is shareable
|
||||
* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
|
||||
* surface.
|
||||
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
|
||||
* given.
|
||||
*/
|
||||
enum drm_vmw_surface_flags {
|
||||
drm_vmw_surface_flag_shareable = (1 << 0),
|
||||
drm_vmw_surface_flag_scanout = (1 << 1),
|
||||
drm_vmw_surface_flag_create_buffer = (1 << 2)
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_gb_surface_create_req
|
||||
*
|
||||
* @svga3d_flags: SVGA3d surface flags for the device.
|
||||
* @format: SVGA3d format.
|
||||
* @mip_level: Number of mip levels for all faces.
|
||||
* @drm_surface_flags Flags as described above.
|
||||
* @multisample_count Future use. Set to 0.
|
||||
* @autogen_filter Future use. Set to 0.
|
||||
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
|
||||
* if none.
|
||||
* @base_size Size of the base mip level for all faces.
|
||||
*
|
||||
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
|
||||
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
|
||||
*/
|
||||
struct drm_vmw_gb_surface_create_req {
|
||||
uint32_t svga3d_flags;
|
||||
uint32_t format;
|
||||
uint32_t mip_levels;
|
||||
enum drm_vmw_surface_flags drm_surface_flags;
|
||||
uint32_t multisample_count;
|
||||
uint32_t autogen_filter;
|
||||
uint32_t buffer_handle;
|
||||
uint32_t pad64;
|
||||
struct drm_vmw_size base_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_gb_surface_create_rep
|
||||
*
|
||||
* @handle: Surface handle.
|
||||
* @backup_size: Size of backup buffers for this surface.
|
||||
* @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
|
||||
* @buffer_size: Actual size of the buffer identified by
|
||||
* @buffer_handle
|
||||
* @buffer_map_handle: Offset into device address space for the buffer
|
||||
* identified by @buffer_handle.
|
||||
*
|
||||
* Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
|
||||
* Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
|
||||
*/
|
||||
struct drm_vmw_gb_surface_create_rep {
|
||||
uint32_t handle;
|
||||
uint32_t backup_size;
|
||||
uint32_t buffer_handle;
|
||||
uint32_t buffer_size;
|
||||
uint64_t buffer_map_handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* union drm_vmw_gb_surface_create_arg
|
||||
*
|
||||
* @req: Input argument as described above.
|
||||
* @rep: Output argument as described above.
|
||||
*
|
||||
* Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
|
||||
*/
|
||||
union drm_vmw_gb_surface_create_arg {
|
||||
struct drm_vmw_gb_surface_create_rep rep;
|
||||
struct drm_vmw_gb_surface_create_req req;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_GB_SURFACE_REF - Reference a host surface.
|
||||
*
|
||||
* Puts a reference on a host surface with a given handle, as previously
|
||||
* returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
|
||||
* A reference will make sure the surface isn't destroyed while we hold
|
||||
* it and will allow the calling client to use the surface handle in
|
||||
* the command stream.
|
||||
*
|
||||
* On successful return, the Ioctl returns the surface information given
|
||||
* to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_gb_surface_reference_arg
|
||||
*
|
||||
* @creq: The data used as input when the surface was created, as described
|
||||
* above at "struct drm_vmw_gb_surface_create_req"
|
||||
* @crep: Additional data output when the surface was created, as described
|
||||
* above at "struct drm_vmw_gb_surface_create_rep"
|
||||
*
|
||||
* Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
|
||||
*/
|
||||
struct drm_vmw_gb_surface_ref_rep {
|
||||
struct drm_vmw_gb_surface_create_req creq;
|
||||
struct drm_vmw_gb_surface_create_rep crep;
|
||||
};
|
||||
|
||||
/**
|
||||
* union drm_vmw_gb_surface_reference_arg
|
||||
*
|
||||
* @req: Input data as described above at "struct drm_vmw_surface_arg"
|
||||
* @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
|
||||
*
|
||||
* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
|
||||
*/
|
||||
union drm_vmw_gb_surface_reference_arg {
|
||||
struct drm_vmw_gb_surface_ref_rep rep;
|
||||
struct drm_vmw_surface_arg req;
|
||||
};
|
||||
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
|
||||
*
|
||||
* Idles any previously submitted GPU operations on the buffer and
|
||||
* by default blocks command submissions that reference the buffer.
|
||||
* If the file descriptor used to grab a blocking CPU sync is closed, the
|
||||
* cpu sync is released.
|
||||
* The flags argument indicates how the grab / release operation should be
|
||||
* performed:
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum drm_vmw_synccpu_flags - Synccpu flags:
|
||||
*
|
||||
* @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
|
||||
* hint to the kernel to allow command submissions that references the buffer
|
||||
* for read-only.
|
||||
* @drm_vmw_synccpu_write: Sync for write. Block all command submissions
|
||||
* referencing this buffer.
|
||||
* @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
|
||||
* -EBUSY should the buffer be busy.
|
||||
* @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
|
||||
* while the buffer is synced for CPU. This is similar to the GEM bo idle
|
||||
* behavior.
|
||||
*/
|
||||
enum drm_vmw_synccpu_flags {
|
||||
drm_vmw_synccpu_read = (1 << 0),
|
||||
drm_vmw_synccpu_write = (1 << 1),
|
||||
drm_vmw_synccpu_dontblock = (1 << 2),
|
||||
drm_vmw_synccpu_allow_cs = (1 << 3)
|
||||
};
|
||||
|
||||
/**
|
||||
* enum drm_vmw_synccpu_op - Synccpu operations:
|
||||
*
|
||||
* @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
|
||||
* @drm_vmw_synccpu_release: Release a previous grab.
|
||||
*/
|
||||
enum drm_vmw_synccpu_op {
|
||||
drm_vmw_synccpu_grab,
|
||||
drm_vmw_synccpu_release
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_synccpu_arg
|
||||
*
|
||||
* @op: The synccpu operation as described above.
|
||||
* @handle: Handle identifying the buffer object.
|
||||
* @flags: Flags as described above.
|
||||
*/
|
||||
struct drm_vmw_synccpu_arg {
|
||||
enum drm_vmw_synccpu_op op;
|
||||
enum drm_vmw_synccpu_flags flags;
|
||||
uint32_t handle;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user