diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 2e8dfd1a66b6..f92a32d095d9 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -164,6 +164,12 @@ DMA Fence Signalling Annotations .. kernel-doc:: drivers/dma-buf/dma-fence.c :doc: fence signalling annotation +DMA Fence Deadline Hints +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/dma-buf/dma-fence.c + :doc: deadline hints + DMA Fences Functions Reference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -197,8 +203,8 @@ DMA Fence unwrap .. kernel-doc:: include/linux/dma-fence-unwrap.h :internal: -DMA Fence uABI/Sync File -~~~~~~~~~~~~~~~~~~~~~~~~ +DMA Fence Sync File +~~~~~~~~~~~~~~~~~~~ .. kernel-doc:: drivers/dma-buf/sync_file.c :export: @@ -206,6 +212,12 @@ DMA Fence uABI/Sync File .. kernel-doc:: include/linux/sync_file.h :internal: +DMA Fence Sync File uABI +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: include/uapi/linux/sync_file.h + :internal: + Indefinite DMA Fences ~~~~~~~~~~~~~~~~~~~~~ diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 5c8a7084577b..9b3ce8948351 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -123,12 +123,23 @@ static void dma_fence_array_release(struct dma_fence *fence) dma_fence_free(fence); } +static void dma_fence_array_set_deadline(struct dma_fence *fence, + ktime_t deadline) +{ + struct dma_fence_array *array = to_dma_fence_array(fence); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) + dma_fence_set_deadline(array->fences[i], deadline); +} + const struct dma_fence_ops dma_fence_array_ops = { .get_driver_name = dma_fence_array_get_driver_name, .get_timeline_name = dma_fence_array_get_timeline_name, .enable_signaling = dma_fence_array_enable_signaling, .signaled = dma_fence_array_signaled, .release = dma_fence_array_release, + .set_deadline = dma_fence_array_set_deadline, }; EXPORT_SYMBOL(dma_fence_array_ops); diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index a0d920576ba6..9663ba1bb6ac 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -206,6 +206,17 @@ static void dma_fence_chain_release(struct dma_fence *fence) dma_fence_free(fence); } + +static void dma_fence_chain_set_deadline(struct dma_fence *fence, + ktime_t deadline) +{ + dma_fence_chain_for_each(fence, fence) { + struct dma_fence *f = dma_fence_chain_contained(fence); + + dma_fence_set_deadline(f, deadline); + } +} + const struct dma_fence_ops dma_fence_chain_ops = { .use_64bit_seqno = true, .get_driver_name = dma_fence_chain_get_driver_name, @@ -213,6 +224,7 @@ const struct dma_fence_ops dma_fence_chain_ops = { .enable_signaling = dma_fence_chain_enable_signaling, .signaled = dma_fence_chain_signaled, .release = dma_fence_chain_release, + .set_deadline = dma_fence_chain_set_deadline, }; EXPORT_SYMBOL(dma_fence_chain_ops); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 0de0482cd36e..f177c56269bb 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -912,6 +912,65 @@ err_free_cb: } EXPORT_SYMBOL(dma_fence_wait_any_timeout); +/** + * DOC: deadline hints + * + * In an ideal world, it would be possible to pipeline a workload sufficiently + * that a utilization based device frequency governor could arrive at a minimum + * frequency that meets the requirements of the use-case, in order to minimize + * power consumption. But in the real world there are many workloads which + * defy this ideal. For example, but not limited to: + * + * * Workloads that ping-pong between device and CPU, with alternating periods + * of CPU waiting for device, and device waiting on CPU. This can result in + * devfreq and cpufreq seeing idle time in their respective domains and in + * result reduce frequency. + * + * * Workloads that interact with a periodic time based deadline, such as double + * buffered GPU rendering vs vblank sync'd page flipping. In this scenario, + * missing a vblank deadline results in an *increase* in idle time on the GPU + * (since it has to wait an additional vblank period), sending a signal to + * the GPU's devfreq to reduce frequency, when in fact the opposite is what is + * needed. + * + * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline. + * The deadline hint provides a way for the waiting driver, or userspace, to + * convey an appropriate sense of urgency to the signaling driver. + * + * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace + * facing APIs). The time could either be some point in the future (such as + * the vblank based deadline for page-flipping, or the start of a compositor's + * composition cycle), or the current time to indicate an immediate deadline + * hint (Ie. forward progress cannot be made until this fence is signaled). + * + * Multiple deadlines may be set on a given fence, even in parallel. See the + * documentation for &dma_fence_ops.set_deadline. + * + * The deadline hint is just that, a hint. The driver that created the fence + * may react by increasing frequency, making different scheduling choices, etc. + * Or doing nothing at all. + */ + +/** + * dma_fence_set_deadline - set desired fence-wait deadline hint + * @fence: the fence that is to be waited on + * @deadline: the time by which the waiter hopes for the fence to be + * signaled + * + * Give the fence signaler a hint about an upcoming deadline, such as + * vblank, by which point the waiter would prefer the fence to be + * signaled by. This is intended to give feedback to the fence signaler + * to aid in power management decisions, such as boosting GPU frequency + * if a periodic vblank deadline is approaching but the fence is not + * yet signaled.. + */ +void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) +{ + if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) + fence->ops->set_deadline(fence, deadline); +} +EXPORT_SYMBOL(dma_fence_set_deadline); + /** * dma_fence_describe - Dump fence describtion into seq_file * @fence: the 6fence to describe diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 1c76aed8e262..2a594b754af1 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -684,6 +684,28 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, } EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); +/** + * dma_resv_set_deadline - Set a deadline on reservation's objects fences + * @obj: the reservation object + * @usage: controls which fences to include, see enum dma_resv_usage. + * @deadline: the requested deadline (MONOTONIC) + * + * May be called without holding the dma_resv lock. Sets @deadline on + * all fences filtered by @usage. + */ +void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage, + ktime_t deadline) +{ + struct dma_resv_iter cursor; + struct dma_fence *fence; + + dma_resv_iter_begin(&cursor, obj, usage); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + dma_fence_set_deadline(fence, deadline); + } + dma_resv_iter_end(&cursor); +} +EXPORT_SYMBOL_GPL(dma_resv_set_deadline); /** * dma_resv_test_signaled - Test if a reservation object's fences have been diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 8606876f7233..d4d2a2ce40f8 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1511,6 +1511,41 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, } EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); +/* + * For atomic updates which touch just a single CRTC, calculate the time of the + * next vblank, and inform all the fences of the deadline. + */ +static void set_fence_deadline(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *new_plane_state; + ktime_t vbltime = 0; + int i; + + for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { + ktime_t v; + + if (drm_crtc_next_vblank_start(crtc, &v)) + continue; + + if (!vbltime || ktime_before(v, vbltime)) + vbltime = v; + } + + /* If no CRTCs updated, then nothing to do: */ + if (!vbltime) + return; + + for_each_new_plane_in_state (state, plane, new_plane_state, i) { + if (!new_plane_state->fence) + continue; + dma_fence_set_deadline(new_plane_state->fence, vbltime); + } +} + /** * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state * @dev: DRM device @@ -1540,6 +1575,8 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_plane_state *new_plane_state; int i, ret; + set_fence_deadline(dev, state); + for_each_new_plane_in_state(state, plane, new_plane_state, i) { if (!new_plane_state->fence) continue; diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 2ff31717a3de..299fa2a19a90 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -844,10 +844,9 @@ bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc, EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp); /** - * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent - * vblank interval - * @dev: DRM device - * @pipe: index of CRTC whose vblank timestamp to retrieve + * drm_crtc_get_last_vbltimestamp - retrieve raw timestamp for the most + * recent vblank interval + * @crtc: CRTC whose vblank timestamp to retrieve * @tvblank: Pointer to target time which should receive the timestamp * @in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers @@ -865,10 +864,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp); * True if timestamp is considered to be very precise, false otherwise. */ static bool -drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, - ktime_t *tvblank, bool in_vblank_irq) +drm_crtc_get_last_vbltimestamp(struct drm_crtc *crtc, ktime_t *tvblank, + bool in_vblank_irq) { - struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); bool ret = false; /* Define requested maximum error on timestamps (nanoseconds). */ @@ -876,8 +874,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, /* Query driver if possible and precision timestamping enabled. */ if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) { - struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); - ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error, tvblank, in_vblank_irq); } @@ -891,6 +887,15 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, return ret; } +static bool +drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, + ktime_t *tvblank, bool in_vblank_irq) +{ + struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); + + return drm_crtc_get_last_vbltimestamp(crtc, tvblank, in_vblank_irq); +} + /** * drm_crtc_vblank_count - retrieve "cooked" vblank counter value * @crtc: which counter to retrieve @@ -980,6 +985,36 @@ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, } EXPORT_SYMBOL(drm_crtc_vblank_count_and_time); +/** + * drm_crtc_next_vblank_start - calculate the time of the next vblank + * @crtc: the crtc for which to calculate next vblank time + * @vblanktime: pointer to time to receive the next vblank timestamp. + * + * Calculate the expected time of the start of the next vblank period, + * based on time of previous vblank and frame duration + */ +int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime) +{ + unsigned int pipe = drm_crtc_index(crtc); + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[pipe]; + struct drm_display_mode *mode = &vblank->hwmode; + u64 vblank_start; + + if (!vblank->framedur_ns || !vblank->linedur_ns) + return -EINVAL; + + if (!drm_crtc_get_last_vbltimestamp(crtc, vblanktime, false)) + return -EINVAL; + + vblank_start = DIV_ROUND_DOWN_ULL( + (u64)vblank->framedur_ns * mode->crtc_vblank_start, + mode->crtc_vtotal); + *vblanktime = ktime_add(*vblanktime, ns_to_ktime(vblank_start)); + + return 0; +} +EXPORT_SYMBOL(drm_crtc_next_vblank_start); + static void send_vblank_event(struct drm_device *dev, struct drm_pending_vblank_event *e, u64 seq, ktime_t now) diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 7fd869520ef2..fe9c6468e440 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -123,6 +123,37 @@ static void drm_sched_fence_release_finished(struct dma_fence *f) dma_fence_put(&fence->scheduled); } +static void drm_sched_fence_set_deadline_finished(struct dma_fence *f, + ktime_t deadline) +{ + struct drm_sched_fence *fence = to_drm_sched_fence(f); + struct dma_fence *parent; + unsigned long flags; + + spin_lock_irqsave(&fence->lock, flags); + + /* If we already have an earlier deadline, keep it: */ + if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) && + ktime_before(fence->deadline, deadline)) { + spin_unlock_irqrestore(&fence->lock, flags); + return; + } + + fence->deadline = deadline; + set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags); + + spin_unlock_irqrestore(&fence->lock, flags); + + /* + * smp_load_aquire() to ensure that if we are racing another + * thread calling drm_sched_fence_set_parent(), that we see + * the parent set before it calls test_bit(HAS_DEADLINE_BIT) + */ + parent = smp_load_acquire(&fence->parent); + if (parent) + dma_fence_set_deadline(parent, deadline); +} + static const struct dma_fence_ops drm_sched_fence_ops_scheduled = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, @@ -133,6 +164,7 @@ static const struct dma_fence_ops drm_sched_fence_ops_finished = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, .release = drm_sched_fence_release_finished, + .set_deadline = drm_sched_fence_set_deadline_finished, }; struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) @@ -147,6 +179,20 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) } EXPORT_SYMBOL(to_drm_sched_fence); +void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, + struct dma_fence *fence) +{ + /* + * smp_store_release() to ensure another thread racing us + * in drm_sched_fence_set_deadline_finished() sees the + * fence's parent set before test_bit() + */ + smp_store_release(&s_fence->parent, dma_fence_get(fence)); + if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, + &s_fence->finished.flags)) + dma_fence_set_deadline(fence, s_fence->deadline); +} + struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, void *owner) { diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 214364fccb71..250c46cd9c34 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1048,7 +1048,7 @@ static int drm_sched_main(void *param) drm_sched_fence_scheduled(s_fence); if (!IS_ERR_OR_NULL(fence)) { - s_fence->parent = dma_fence_get(fence); + drm_sched_fence_set_parent(s_fence, fence); /* Drop for original kref_init of the fence */ dma_fence_put(fence); diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index 733a3e2d1d10..7f3957943dd1 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -230,6 +230,7 @@ bool drm_dev_has_vblank(const struct drm_device *dev); u64 drm_crtc_vblank_count(struct drm_crtc *crtc); u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ktime_t *vblanktime); +int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime); void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e); void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 898608f87b96..c0ec6719282a 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -41,6 +41,15 @@ */ #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS +/** + * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set + * + * Because we could have a deadline hint can be set before the backing hw + * fence is created, we need to keep track of whether a deadline has already + * been set. + */ +#define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1) + enum dma_resv_usage; struct dma_resv; struct drm_gem_object; @@ -282,6 +291,12 @@ struct drm_sched_fence { */ struct dma_fence finished; + /** + * @deadline: deadline set on &drm_sched_fence.finished which + * potentially needs to be propagated to &drm_sched_fence.parent + */ + ktime_t deadline; + /** * @parent: the fence returned by &drm_sched_backend_ops.run_job * when scheduling the job on hardware. We signal the @@ -574,6 +589,8 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority); bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); +void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, + struct dma_fence *fence); struct drm_sched_fence *drm_sched_fence_alloc( struct drm_sched_entity *s_entity, void *owner); void drm_sched_fence_init(struct drm_sched_fence *fence, diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 775cdc0b4f24..d54b595a0fe0 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -257,6 +257,26 @@ struct dma_fence_ops { */ void (*timeline_value_str)(struct dma_fence *fence, char *str, int size); + + /** + * @set_deadline: + * + * Callback to allow a fence waiter to inform the fence signaler of + * an upcoming deadline, such as vblank, by which point the waiter + * would prefer the fence to be signaled by. This is intended to + * give feedback to the fence signaler to aid in power management + * decisions, such as boosting GPU frequency. + * + * This is called without &dma_fence.lock held, it can be called + * multiple times and from any context. Locking is up to the callee + * if it has some state to manage. If multiple deadlines are set, + * the expectation is to track the soonest one. If the deadline is + * before the current time, it should be interpreted as an immediate + * deadline. + * + * This callback is optional. + */ + void (*set_deadline)(struct dma_fence *fence, ktime_t deadline); }; void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, @@ -583,6 +603,8 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) return ret < 0 ? ret : 0; } +void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline); + struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_allocate_private_stub(void); u64 dma_fence_context_alloc(unsigned num); diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 0637659a702c..8d0e34dad446 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -479,6 +479,8 @@ int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, bool intr, unsigned long timeout); +void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage, + ktime_t deadline); bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h index ee2dcfb3d660..7e42a5b7558b 100644 --- a/include/uapi/linux/sync_file.h +++ b/include/uapi/linux/sync_file.h @@ -16,12 +16,16 @@ #include /** - * struct sync_merge_data - data passed to merge ioctl + * struct sync_merge_data - SYNC_IOC_MERGE: merge two fences * @name: name of new fence * @fd2: file descriptor of second fence * @fence: returns the fd of the new fence to userspace * @flags: merge_data flags * @pad: padding for 64-bit alignment, should always be zero + * + * Creates a new fence containing copies of the sync_pts in both + * the calling fd and sync_merge_data.fd2. Returns the new fence's + * fd in sync_merge_data.fence */ struct sync_merge_data { char name[32]; @@ -34,8 +38,8 @@ struct sync_merge_data { /** * struct sync_fence_info - detailed fence information * @obj_name: name of parent sync_timeline -* @driver_name: name of driver implementing the parent -* @status: status of the fence 0:active 1:signaled <0:error + * @driver_name: name of driver implementing the parent + * @status: status of the fence 0:active 1:signaled <0:error * @flags: fence_info flags * @timestamp_ns: timestamp of status change in nanoseconds */ @@ -48,14 +52,19 @@ struct sync_fence_info { }; /** - * struct sync_file_info - data returned from fence info ioctl + * struct sync_file_info - SYNC_IOC_FILE_INFO: get detailed information on a sync_file * @name: name of fence * @status: status of fence. 1: signaled 0:active <0:error * @flags: sync_file_info flags * @num_fences number of fences in the sync_file * @pad: padding for 64-bit alignment, should always be zero - * @sync_fence_info: pointer to array of structs sync_fence_info with all + * @sync_fence_info: pointer to array of struct &sync_fence_info with all * fences in the sync_file + * + * Takes a struct sync_file_info. If num_fences is 0, the field is updated + * with the actual number of fences. If num_fences is > 0, the system will + * use the pointer provided on sync_fence_info to return up to num_fences of + * struct sync_fence_info, with detailed fence information. */ struct sync_file_info { char name[32]; @@ -69,30 +78,14 @@ struct sync_file_info { #define SYNC_IOC_MAGIC '>' -/** +/* * Opcodes 0, 1 and 2 were burned during a API change to avoid users of the * old API to get weird errors when trying to handling sync_files. The API * change happened during the de-stage of the Sync Framework when there was * no upstream users available. */ -/** - * DOC: SYNC_IOC_MERGE - merge two fences - * - * Takes a struct sync_merge_data. Creates a new fence containing copies of - * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the - * new fence's fd in sync_merge_data.fence - */ #define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data) - -/** - * DOC: SYNC_IOC_FILE_INFO - get detailed information on a sync_file - * - * Takes a struct sync_file_info. If num_fences is 0, the field is updated - * with the actual number of fences. If num_fences is > 0, the system will - * use the pointer provided on sync_fence_info to return up to num_fences of - * struct sync_fence_info, with detailed fence information. - */ #define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info) #endif /* _UAPI_LINUX_SYNC_H */