2009-08-25 10:15:50 +00:00
|
|
|
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
|
|
|
#define _I915_TRACE_H_
|
|
|
|
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/tracepoint.h>
|
|
|
|
|
|
|
|
#include <drm/drmP.h>
|
2010-11-08 19:18:58 +00:00
|
|
|
#include "i915_drv.h"
|
2014-04-29 10:35:48 +00:00
|
|
|
#include "intel_drv.h"
|
2011-02-03 11:57:46 +00:00
|
|
|
#include "intel_ringbuffer.h"
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
#undef TRACE_SYSTEM
|
|
|
|
#define TRACE_SYSTEM i915
|
|
|
|
#define TRACE_INCLUDE_FILE i915_trace
|
|
|
|
|
2014-04-29 10:35:48 +00:00
|
|
|
/* pipe updates */
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_pipe_update_start,
|
2015-09-17 15:08:32 +00:00
|
|
|
TP_PROTO(struct intel_crtc *crtc),
|
|
|
|
TP_ARGS(crtc),
|
2014-04-29 10:35:48 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(enum pipe, pipe)
|
|
|
|
__field(u32, frame)
|
|
|
|
__field(u32, scanline)
|
|
|
|
__field(u32, min)
|
|
|
|
__field(u32, max)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->pipe = crtc->pipe;
|
|
|
|
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
|
|
|
|
crtc->pipe);
|
|
|
|
__entry->scanline = intel_get_crtc_scanline(crtc);
|
2015-09-17 15:08:32 +00:00
|
|
|
__entry->min = crtc->debug.min_vbl;
|
|
|
|
__entry->max = crtc->debug.max_vbl;
|
2014-04-29 10:35:48 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
|
|
|
|
pipe_name(__entry->pipe), __entry->frame,
|
|
|
|
__entry->scanline, __entry->min, __entry->max)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_pipe_update_vblank_evaded,
|
2015-09-17 15:08:32 +00:00
|
|
|
TP_PROTO(struct intel_crtc *crtc),
|
|
|
|
TP_ARGS(crtc),
|
2014-04-29 10:35:48 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(enum pipe, pipe)
|
|
|
|
__field(u32, frame)
|
|
|
|
__field(u32, scanline)
|
|
|
|
__field(u32, min)
|
|
|
|
__field(u32, max)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->pipe = crtc->pipe;
|
2015-09-17 15:08:32 +00:00
|
|
|
__entry->frame = crtc->debug.start_vbl_count;
|
|
|
|
__entry->scanline = crtc->debug.scanline_start;
|
|
|
|
__entry->min = crtc->debug.min_vbl;
|
|
|
|
__entry->max = crtc->debug.max_vbl;
|
2014-04-29 10:35:48 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
|
|
|
|
pipe_name(__entry->pipe), __entry->frame,
|
|
|
|
__entry->scanline, __entry->min, __entry->max)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_pipe_update_end,
|
2015-09-17 15:08:32 +00:00
|
|
|
TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
|
|
|
|
TP_ARGS(crtc, frame, scanline_end),
|
2014-04-29 10:35:48 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(enum pipe, pipe)
|
|
|
|
__field(u32, frame)
|
|
|
|
__field(u32, scanline)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->pipe = crtc->pipe;
|
|
|
|
__entry->frame = frame;
|
2015-09-17 15:08:32 +00:00
|
|
|
__entry->scanline = scanline_end;
|
2014-04-29 10:35:48 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("pipe %c, frame=%u, scanline=%u",
|
|
|
|
pipe_name(__entry->pipe), __entry->frame,
|
|
|
|
__entry->scanline)
|
|
|
|
);
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
/* object tracking */
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_gem_object_create,
|
2010-11-08 19:18:58 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
2009-08-25 10:15:50 +00:00
|
|
|
TP_ARGS(obj),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-08 19:18:58 +00:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2009-08-25 10:15:50 +00:00
|
|
|
__field(u32, size)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
2010-11-08 19:18:58 +00:00
|
|
|
__entry->size = obj->base.size;
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
|
|
|
|
);
|
|
|
|
|
2015-10-01 11:18:26 +00:00
|
|
|
TRACE_EVENT(i915_gem_shrink,
|
|
|
|
TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
|
|
|
|
TP_ARGS(i915, target, flags),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(int, dev)
|
|
|
|
__field(unsigned long, target)
|
|
|
|
__field(unsigned, flags)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = i915->drm.primary->index;
|
2015-10-01 11:18:26 +00:00
|
|
|
__entry->target = target;
|
|
|
|
__entry->flags = flags;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%d, target=%lu, flags=%x",
|
|
|
|
__entry->dev, __entry->target, __entry->flags)
|
|
|
|
);
|
|
|
|
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 00:00:10 +00:00
|
|
|
TRACE_EVENT(i915_vma_bind,
|
2014-02-14 13:01:11 +00:00
|
|
|
TP_PROTO(struct i915_vma *vma, unsigned flags),
|
|
|
|
TP_ARGS(vma, flags),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-08 19:18:58 +00:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 00:00:10 +00:00
|
|
|
__field(struct i915_address_space *, vm)
|
2015-01-22 17:01:23 +00:00
|
|
|
__field(u64, offset)
|
2011-02-03 11:57:46 +00:00
|
|
|
__field(u32, size)
|
2014-02-14 13:01:11 +00:00
|
|
|
__field(unsigned, flags)
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 00:00:10 +00:00
|
|
|
__entry->obj = vma->obj;
|
|
|
|
__entry->vm = vma->vm;
|
|
|
|
__entry->offset = vma->node.start;
|
|
|
|
__entry->size = vma->node.size;
|
2014-02-14 13:01:11 +00:00
|
|
|
__entry->flags = flags;
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
2015-01-22 17:01:23 +00:00
|
|
|
TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
|
2011-02-03 11:57:46 +00:00
|
|
|
__entry->obj, __entry->offset, __entry->size,
|
2014-02-14 13:01:11 +00:00
|
|
|
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 00:00:10 +00:00
|
|
|
__entry->vm)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 00:00:10 +00:00
|
|
|
TRACE_EVENT(i915_vma_unbind,
|
|
|
|
TP_PROTO(struct i915_vma *vma),
|
|
|
|
TP_ARGS(vma),
|
2011-02-03 11:57:46 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 00:00:10 +00:00
|
|
|
__field(struct i915_address_space *, vm)
|
2015-01-22 17:01:23 +00:00
|
|
|
__field(u64, offset)
|
2011-02-03 11:57:46 +00:00
|
|
|
__field(u32, size)
|
|
|
|
),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_fast_assign(
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 00:00:10 +00:00
|
|
|
__entry->obj = vma->obj;
|
|
|
|
__entry->vm = vma->vm;
|
|
|
|
__entry->offset = vma->node.start;
|
|
|
|
__entry->size = vma->node.size;
|
2011-02-03 11:57:46 +00:00
|
|
|
),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2015-01-22 17:01:23 +00:00
|
|
|
TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
|
drm/i915: plumb VM into bind/unbind code
As alluded to in several patches, and it will be reiterated later... A
VMA is an abstraction for a GEM BO bound into an address space.
Therefore it stands to reason, that the existing bind, and unbind are
the ones which will be the most impacted. This patch implements this,
and updates all callers which weren't already updated in the series
(because it was too messy).
This patch represents the bulk of an earlier, larger patch. I've pulled
out a bunch of things by the request of Daniel. The history is preserved
for posterity with the email convention of ">" One big change from the
original patch aside from a bunch of cropping is I've created an
i915_vma_unbind() function. That is because we always have the VMA
anyway, and doing an extra lookup is useful. There is a caveat, we
retain an i915_gem_object_ggtt_unbind, for the global cases which might
not talk in VMAs.
> drm/i915: plumb VM into object operations
>
> This patch was formerly known as:
> "drm/i915: Create VMAs (part 3) - plumbing"
>
> This patch adds a VM argument, bind/unbind, and the object
> offset/size/color getters/setters. It preserves the old ggtt helper
> functions because things still need, and will continue to need them.
>
> Some code will still need to be ported over after this.
>
> v2: Fix purge to pick an object and unbind all vmas
> This was doable because of the global bound list change.
>
> v3: With the commit to actually pin/unpin pages in place, there is no
> longer a need to check if unbind succeeded before calling put_pages().
> Make put_pages only BUG() after checking pin count.
>
> v4: Rebased on top of the new hangcheck work by Mika
> plumbed eb_destroy also
> Many checkpatch related fixes
>
> v5: Very large rebase
>
> v6:
> Change BUG_ON to WARN_ON (Daniel)
> Rename vm to ggtt in preallocate stolen, since it is always ggtt when
> dealing with stolen memory. (Daniel)
> list_for_each will short-circuit already (Daniel)
> remove superflous space (Daniel)
> Use per object list of vmas (Daniel)
> Make obj_bound_any() use obj_bound for each vm (Ben)
> s/bind_to_gtt/bind_to_vm/ (Ben)
>
> Fixed up the inactive shrinker. As Daniel noticed the code could
> potentially count the same object multiple times. While it's not
> possible in the current case, since 1 object can only ever be bound into
> 1 address space thus far - we may as well try to get something more
> future proof in place now. With a prep patch before this to switch over
> to using the bound list + inactive check, we're now able to carry that
> forward for every address space an object is bound into.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Rebase on top of the loss of "drm/i915: Cleanup more of VMA
in destroy".]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2013-08-01 00:00:10 +00:00
|
|
|
__entry->obj, __entry->offset, __entry->size, __entry->vm)
|
2011-02-03 11:57:46 +00:00
|
|
|
);
|
|
|
|
|
2016-02-26 11:03:20 +00:00
|
|
|
TRACE_EVENT(i915_va_alloc,
|
|
|
|
TP_PROTO(struct i915_vma *vma),
|
|
|
|
TP_ARGS(vma),
|
2015-03-24 15:46:23 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct i915_address_space *, vm)
|
|
|
|
__field(u64, start)
|
|
|
|
__field(u64, end)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-02-26 11:03:20 +00:00
|
|
|
__entry->vm = vma->vm;
|
|
|
|
__entry->start = vma->node.start;
|
|
|
|
__entry->end = vma->node.start + vma->node.size - 1;
|
2015-03-24 15:46:23 +00:00
|
|
|
),
|
|
|
|
|
2016-02-26 11:03:20 +00:00
|
|
|
TP_printk("vm=%p (%c), 0x%llx-0x%llx",
|
|
|
|
__entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end)
|
2015-03-24 15:46:23 +00:00
|
|
|
);
|
|
|
|
|
2015-07-29 16:23:49 +00:00
|
|
|
DECLARE_EVENT_CLASS(i915_px_entry,
|
|
|
|
TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
|
|
|
|
TP_ARGS(vm, px, start, px_shift),
|
2015-03-24 15:46:23 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct i915_address_space *, vm)
|
2015-07-29 16:23:49 +00:00
|
|
|
__field(u32, px)
|
2015-03-24 15:46:23 +00:00
|
|
|
__field(u64, start)
|
|
|
|
__field(u64, end)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vm = vm;
|
2015-07-29 16:23:49 +00:00
|
|
|
__entry->px = px;
|
2015-03-24 15:46:23 +00:00
|
|
|
__entry->start = start;
|
2015-07-29 16:23:49 +00:00
|
|
|
__entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
|
2015-03-24 15:46:23 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
|
2015-07-29 16:23:49 +00:00
|
|
|
__entry->vm, __entry->px, __entry->start, __entry->end)
|
2015-03-24 15:46:23 +00:00
|
|
|
);
|
|
|
|
|
2015-07-29 16:23:49 +00:00
|
|
|
DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
|
2015-03-24 15:46:23 +00:00
|
|
|
TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
|
|
|
|
TP_ARGS(vm, pde, start, pde_shift)
|
|
|
|
);
|
|
|
|
|
2015-07-29 16:23:49 +00:00
|
|
|
DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
|
|
|
|
TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
|
|
|
|
TP_ARGS(vm, pdpe, start, pdpe_shift),
|
|
|
|
|
|
|
|
TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
|
|
|
|
__entry->vm, __entry->px, __entry->start, __entry->end)
|
|
|
|
);
|
|
|
|
|
2015-07-30 10:05:29 +00:00
|
|
|
DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
|
|
|
|
TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
|
|
|
|
TP_ARGS(vm, pml4e, start, pml4e_shift),
|
|
|
|
|
|
|
|
TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
|
|
|
|
__entry->vm, __entry->px, __entry->start, __entry->end)
|
|
|
|
);
|
|
|
|
|
2015-03-24 15:46:23 +00:00
|
|
|
/* Avoid extra math because we only support two sizes. The format is defined by
|
|
|
|
* bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
|
|
|
|
#define TRACE_PT_SIZE(bits) \
|
|
|
|
((((bits) == 1024) ? 288 : 144) + 1)
|
|
|
|
|
|
|
|
DECLARE_EVENT_CLASS(i915_page_table_entry_update,
|
|
|
|
TP_PROTO(struct i915_address_space *vm, u32 pde,
|
2015-04-08 11:13:23 +00:00
|
|
|
struct i915_page_table *pt, u32 first, u32 count, u32 bits),
|
2015-03-24 15:46:23 +00:00
|
|
|
TP_ARGS(vm, pde, pt, first, count, bits),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct i915_address_space *, vm)
|
|
|
|
__field(u32, pde)
|
|
|
|
__field(u32, first)
|
|
|
|
__field(u32, last)
|
|
|
|
__dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vm = vm;
|
|
|
|
__entry->pde = pde;
|
|
|
|
__entry->first = first;
|
|
|
|
__entry->last = first + count - 1;
|
|
|
|
scnprintf(__get_str(cur_ptes),
|
|
|
|
TRACE_PT_SIZE(bits),
|
|
|
|
"%*pb",
|
|
|
|
bits,
|
|
|
|
pt->used_ptes);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
|
|
|
|
__entry->vm, __entry->pde, __entry->last, __entry->first,
|
|
|
|
__get_str(cur_ptes))
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
|
|
|
|
TP_PROTO(struct i915_address_space *vm, u32 pde,
|
2015-04-08 11:13:23 +00:00
|
|
|
struct i915_page_table *pt, u32 first, u32 count, u32 bits),
|
2015-03-24 15:46:23 +00:00
|
|
|
TP_ARGS(vm, pde, pt, first, count, bits)
|
|
|
|
);
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TRACE_EVENT(i915_gem_object_change_domain,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
|
|
|
|
TP_ARGS(obj, old_read, old_write),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-08 19:18:58 +00:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2009-08-25 10:15:50 +00:00
|
|
|
__field(u32, read_domains)
|
|
|
|
__field(u32, write_domain)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
2011-02-03 11:57:46 +00:00
|
|
|
__entry->read_domains = obj->base.read_domains | (old_read << 16);
|
|
|
|
__entry->write_domain = obj->base.write_domain | (old_write << 16);
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
|
2009-08-25 10:15:50 +00:00
|
|
|
__entry->obj,
|
2011-02-03 11:57:46 +00:00
|
|
|
__entry->read_domains >> 16,
|
|
|
|
__entry->read_domains & 0xffff,
|
|
|
|
__entry->write_domain >> 16,
|
|
|
|
__entry->write_domain & 0xffff)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TRACE_EVENT(i915_gem_object_pwrite,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
|
|
|
|
TP_ARGS(obj, offset, len),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
|
|
__field(u32, offset)
|
|
|
|
__field(u32, len)
|
|
|
|
),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
|
|
|
__entry->offset = offset;
|
|
|
|
__entry->len = len;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("obj=%p, offset=%u, len=%u",
|
|
|
|
__entry->obj, __entry->offset, __entry->len)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_gem_object_pread,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
|
|
|
|
TP_ARGS(obj, offset, len),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-08 19:18:58 +00:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2011-02-03 11:57:46 +00:00
|
|
|
__field(u32, offset)
|
|
|
|
__field(u32, len)
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
2011-02-03 11:57:46 +00:00
|
|
|
__entry->offset = offset;
|
|
|
|
__entry->len = len;
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_printk("obj=%p, offset=%u, len=%u",
|
|
|
|
__entry->obj, __entry->offset, __entry->len)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TRACE_EVENT(i915_gem_object_fault,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
|
|
|
|
TP_ARGS(obj, index, gtt, write),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
|
|
__field(u32, index)
|
|
|
|
__field(bool, gtt)
|
|
|
|
__field(bool, write)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
|
|
|
__entry->index = index;
|
|
|
|
__entry->gtt = gtt;
|
|
|
|
__entry->write = write;
|
|
|
|
),
|
2010-05-24 08:25:44 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_printk("obj=%p, %s index=%u %s",
|
|
|
|
__entry->obj,
|
|
|
|
__entry->gtt ? "GTT" : "CPU",
|
|
|
|
__entry->index,
|
|
|
|
__entry->write ? ", writable" : "")
|
|
|
|
);
|
|
|
|
|
|
|
|
DECLARE_EVENT_CLASS(i915_gem_object,
|
2010-11-08 19:18:58 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_ARGS(obj),
|
2010-05-24 08:25:44 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->obj = obj;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("obj=%p", __entry->obj)
|
2010-05-24 08:25:44 +00:00
|
|
|
);
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
|
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
|
|
|
TP_ARGS(obj)
|
|
|
|
);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
|
2010-11-08 19:18:58 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_object *obj),
|
2010-03-11 08:41:45 +00:00
|
|
|
TP_ARGS(obj)
|
|
|
|
);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TRACE_EVENT(i915_gem_evict,
|
2016-08-04 15:32:18 +00:00
|
|
|
TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
|
|
|
|
TP_ARGS(vm, size, align, flags),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
2016-08-04 15:32:18 +00:00
|
|
|
__field(struct i915_address_space *, vm)
|
2011-02-03 11:57:46 +00:00
|
|
|
__field(u32, size)
|
|
|
|
__field(u32, align)
|
2016-08-04 15:32:18 +00:00
|
|
|
__field(unsigned int, flags)
|
2011-02-03 11:57:46 +00:00
|
|
|
),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_fast_assign(
|
2016-11-29 12:42:05 +00:00
|
|
|
__entry->dev = vm->i915->drm.primary->index;
|
2016-08-04 15:32:18 +00:00
|
|
|
__entry->vm = vm;
|
2011-02-03 11:57:46 +00:00
|
|
|
__entry->size = size;
|
|
|
|
__entry->align = align;
|
2014-02-14 13:01:11 +00:00
|
|
|
__entry->flags = flags;
|
2011-02-03 11:57:46 +00:00
|
|
|
),
|
|
|
|
|
2016-08-04 15:32:18 +00:00
|
|
|
TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
|
|
|
|
__entry->dev, __entry->vm, __entry->size, __entry->align,
|
2014-02-14 13:01:11 +00:00
|
|
|
__entry->flags & PIN_MAPPABLE ? ", mappable" : "")
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TRACE_EVENT(i915_gem_evict_everything,
|
drm/i915: Track unbound pages
When dealing with a working set larger than the GATT, or even the
mappable aperture when touching through the GTT, we end up with evicting
objects only to rebind them at a new offset again later. Moving an
object into and out of the GTT requires clflushing the pages, thus
causing a double-clflush penalty for rebinding.
To avoid having to clflush on rebinding, we can track the pages as they
are evicted from the GTT and only relinquish those pages on memory
pressure.
As usual, if it were not for the handling of out-of-memory condition and
having to manually shrink our own bo caches, it would be a net reduction
of code. Alas.
Note: The patch also contains a few changes to the last-hope
evict_everything logic in i916_gem_execbuffer.c - we no longer try to
only evict the purgeable stuff in a first try (since that's superflous
and only helps in OOM corner-cases, not fragmented-gtt trashing
situations).
Also, the extraction of the get_pages retry loop from bind_to_gtt (and
other callsites) to get_pages should imo have been a separate patch.
v2: Ditch the newly added put_pages (for unbound objects only) in
i915_gem_reset. A quick irc discussion hasn't revealed any important
reason for this, so if we need this, I'd like to have a git blame'able
explanation for it.
v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Split out code movements and rant a bit in the commit message
with a few Notes. Done v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 09:40:46 +00:00
|
|
|
TP_PROTO(struct drm_device *dev),
|
|
|
|
TP_ARGS(dev),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->dev = dev->primary->index;
|
|
|
|
),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
drm/i915: Track unbound pages
When dealing with a working set larger than the GATT, or even the
mappable aperture when touching through the GTT, we end up with evicting
objects only to rebind them at a new offset again later. Moving an
object into and out of the GTT requires clflushing the pages, thus
causing a double-clflush penalty for rebinding.
To avoid having to clflush on rebinding, we can track the pages as they
are evicted from the GTT and only relinquish those pages on memory
pressure.
As usual, if it were not for the handling of out-of-memory condition and
having to manually shrink our own bo caches, it would be a net reduction
of code. Alas.
Note: The patch also contains a few changes to the last-hope
evict_everything logic in i916_gem_execbuffer.c - we no longer try to
only evict the purgeable stuff in a first try (since that's superflous
and only helps in OOM corner-cases, not fragmented-gtt trashing
situations).
Also, the extraction of the get_pages retry loop from bind_to_gtt (and
other callsites) to get_pages should imo have been a separate patch.
v2: Ditch the newly added put_pages (for unbound objects only) in
i915_gem_reset. A quick irc discussion hasn't revealed any important
reason for this, so if we need this, I'd like to have a git blame'able
explanation for it.
v3: Undo the s/drm_malloc_ab/kmalloc/ in get_pages that Chris noticed.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
[danvet: Split out code movements and rant a bit in the commit message
with a few Notes. Done v2]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-08-20 09:40:46 +00:00
|
|
|
TP_printk("dev=%d", __entry->dev)
|
2011-02-03 11:57:46 +00:00
|
|
|
);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2013-09-24 16:57:56 +00:00
|
|
|
TRACE_EVENT(i915_gem_evict_vm,
|
|
|
|
TP_PROTO(struct i915_address_space *vm),
|
|
|
|
TP_ARGS(vm),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2014-03-18 15:27:37 +00:00
|
|
|
__field(u32, dev)
|
2013-09-24 16:57:56 +00:00
|
|
|
__field(struct i915_address_space *, vm)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-11-29 12:42:05 +00:00
|
|
|
__entry->dev = vm->i915->drm.primary->index;
|
2013-09-24 16:57:56 +00:00
|
|
|
__entry->vm = vm;
|
|
|
|
),
|
|
|
|
|
2014-03-18 15:27:37 +00:00
|
|
|
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
|
2013-09-24 16:57:56 +00:00
|
|
|
);
|
|
|
|
|
2017-01-11 11:23:11 +00:00
|
|
|
TRACE_EVENT(i915_gem_evict_node,
|
|
|
|
TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
|
|
|
|
TP_ARGS(vm, node, flags),
|
2016-12-05 14:29:37 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
__field(struct i915_address_space *, vm)
|
|
|
|
__field(u64, start)
|
|
|
|
__field(u64, size)
|
|
|
|
__field(unsigned long, color)
|
|
|
|
__field(unsigned int, flags)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2017-01-11 11:23:11 +00:00
|
|
|
__entry->dev = vm->i915->drm.primary->index;
|
|
|
|
__entry->vm = vm;
|
|
|
|
__entry->start = node->start;
|
|
|
|
__entry->size = node->size;
|
|
|
|
__entry->color = node->color;
|
2016-12-05 14:29:37 +00:00
|
|
|
__entry->flags = flags;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x",
|
|
|
|
__entry->dev, __entry->vm,
|
|
|
|
__entry->start, __entry->size,
|
|
|
|
__entry->color, __entry->flags)
|
|
|
|
);
|
|
|
|
|
2013-09-25 10:43:28 +00:00
|
|
|
TRACE_EVENT(i915_gem_ring_sync_to,
|
2016-08-02 21:50:26 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_request *to,
|
|
|
|
struct drm_i915_gem_request *from),
|
|
|
|
TP_ARGS(to, from),
|
2013-09-25 10:43:28 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
__field(u32, sync_from)
|
|
|
|
__field(u32, sync_to)
|
|
|
|
__field(u32, seqno)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = from->i915->drm.primary->index;
|
2016-08-02 21:50:26 +00:00
|
|
|
__entry->sync_from = from->engine->id;
|
|
|
|
__entry->sync_to = to->engine->id;
|
2016-10-28 12:58:49 +00:00
|
|
|
__entry->seqno = from->global_seqno;
|
2013-09-25 10:43:28 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
|
|
|
|
__entry->dev,
|
|
|
|
__entry->sync_from, __entry->sync_to,
|
|
|
|
__entry->seqno)
|
|
|
|
);
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TRACE_EVENT(i915_gem_ring_dispatch,
|
2014-11-24 18:49:38 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
|
|
|
|
TP_ARGS(req, flags),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2009-09-23 23:23:33 +00:00
|
|
|
__field(u32, dev)
|
2011-02-03 11:57:46 +00:00
|
|
|
__field(u32, ring)
|
2009-08-25 10:15:50 +00:00
|
|
|
__field(u32, seqno)
|
2012-10-17 11:09:54 +00:00
|
|
|
__field(u32, flags)
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = req->i915->drm.primary->index;
|
2016-05-06 14:40:21 +00:00
|
|
|
__entry->ring = req->engine->id;
|
2016-10-28 12:58:49 +00:00
|
|
|
__entry->seqno = req->global_seqno;
|
2012-10-17 11:09:54 +00:00
|
|
|
__entry->flags = flags;
|
2016-10-25 12:00:45 +00:00
|
|
|
dma_fence_enable_sw_signaling(&req->fence);
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
2012-10-17 11:09:54 +00:00
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
|
|
|
__entry->dev, __entry->ring, __entry->seqno, __entry->flags)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TRACE_EVENT(i915_gem_ring_flush,
|
2015-05-29 16:43:57 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
|
|
|
|
TP_ARGS(req, invalidate, flush),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2009-09-23 23:23:33 +00:00
|
|
|
__field(u32, dev)
|
2011-02-03 11:57:46 +00:00
|
|
|
__field(u32, ring)
|
|
|
|
__field(u32, invalidate)
|
|
|
|
__field(u32, flush)
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = req->i915->drm.primary->index;
|
2016-03-16 11:00:38 +00:00
|
|
|
__entry->ring = req->engine->id;
|
2011-02-03 11:57:46 +00:00
|
|
|
__entry->invalidate = invalidate;
|
|
|
|
__entry->flush = flush;
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
|
|
|
|
__entry->dev, __entry->ring,
|
|
|
|
__entry->invalidate, __entry->flush)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2010-03-11 08:41:45 +00:00
|
|
|
DECLARE_EVENT_CLASS(i915_gem_request,
|
2014-11-24 18:49:38 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_request *req),
|
|
|
|
TP_ARGS(req),
|
2009-08-25 10:15:50 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2009-09-23 23:23:33 +00:00
|
|
|
__field(u32, dev)
|
2011-02-03 11:57:46 +00:00
|
|
|
__field(u32, ring)
|
2009-08-25 10:15:50 +00:00
|
|
|
__field(u32, seqno)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = req->i915->drm.primary->index;
|
2016-05-06 14:40:21 +00:00
|
|
|
__entry->ring = req->engine->id;
|
2016-10-28 12:58:49 +00:00
|
|
|
__entry->seqno = req->global_seqno;
|
2009-08-25 10:15:50 +00:00
|
|
|
),
|
|
|
|
|
2015-04-07 15:21:09 +00:00
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u",
|
|
|
|
__entry->dev, __entry->ring, __entry->seqno)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2011-02-03 11:57:46 +00:00
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
|
2014-11-24 18:49:38 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_request *req),
|
|
|
|
TP_ARGS(req)
|
2011-02-03 11:57:46 +00:00
|
|
|
);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2014-12-05 13:49:36 +00:00
|
|
|
TRACE_EVENT(i915_gem_request_notify,
|
2016-03-16 11:00:38 +00:00
|
|
|
TP_PROTO(struct intel_engine_cs *engine),
|
|
|
|
TP_ARGS(engine),
|
2013-09-23 20:33:19 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
__field(u32, ring)
|
|
|
|
__field(u32, seqno)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = engine->i915->drm.primary->index;
|
2016-03-16 11:00:38 +00:00
|
|
|
__entry->ring = engine->id;
|
2016-07-01 16:23:17 +00:00
|
|
|
__entry->seqno = intel_engine_get_seqno(engine);
|
2013-09-23 20:33:19 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u",
|
|
|
|
__entry->dev, __entry->ring, __entry->seqno)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2010-03-11 08:41:45 +00:00
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
|
2014-11-24 18:49:38 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_request *req),
|
|
|
|
TP_ARGS(req)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2014-12-05 13:49:36 +00:00
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
|
|
|
|
TP_PROTO(struct drm_i915_gem_request *req),
|
|
|
|
TP_ARGS(req)
|
|
|
|
);
|
|
|
|
|
2012-05-24 22:03:09 +00:00
|
|
|
TRACE_EVENT(i915_gem_request_wait_begin,
|
2014-11-24 18:49:38 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_request *req),
|
|
|
|
TP_ARGS(req),
|
2012-05-24 22:03:09 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
|
|
|
__field(u32, ring)
|
|
|
|
__field(u32, seqno)
|
|
|
|
__field(bool, blocking)
|
|
|
|
),
|
|
|
|
|
|
|
|
/* NB: the blocking information is racy since mutex_is_locked
|
|
|
|
* doesn't check that the current thread holds the lock. The only
|
|
|
|
* other option would be to pass the boolean information of whether
|
|
|
|
* or not the class was blocking down through the stack which is
|
|
|
|
* less desirable.
|
|
|
|
*/
|
|
|
|
TP_fast_assign(
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = req->i915->drm.primary->index;
|
2016-05-06 14:40:21 +00:00
|
|
|
__entry->ring = req->engine->id;
|
2016-10-28 12:58:49 +00:00
|
|
|
__entry->seqno = req->global_seqno;
|
2014-11-24 18:49:38 +00:00
|
|
|
__entry->blocking =
|
2016-07-05 09:40:23 +00:00
|
|
|
mutex_is_locked(&req->i915->drm.struct_mutex);
|
2012-05-24 22:03:09 +00:00
|
|
|
),
|
|
|
|
|
2015-04-07 15:21:09 +00:00
|
|
|
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
|
|
|
__entry->dev, __entry->ring,
|
2014-12-05 13:49:36 +00:00
|
|
|
__entry->seqno, __entry->blocking ? "yes (NB)" : "no")
|
2010-03-11 08:41:45 +00:00
|
|
|
);
|
2009-08-25 10:15:50 +00:00
|
|
|
|
2010-03-11 08:41:45 +00:00
|
|
|
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
|
2014-11-24 18:49:38 +00:00
|
|
|
TP_PROTO(struct drm_i915_gem_request *req),
|
|
|
|
TP_ARGS(req)
|
2009-08-25 10:15:50 +00:00
|
|
|
);
|
|
|
|
|
2010-07-01 23:48:37 +00:00
|
|
|
TRACE_EVENT(i915_flip_request,
|
2010-11-08 19:18:58 +00:00
|
|
|
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
2010-07-01 23:48:37 +00:00
|
|
|
|
|
|
|
TP_ARGS(plane, obj),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(int, plane)
|
2010-11-08 19:18:58 +00:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2010-07-01 23:48:37 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->plane = plane;
|
|
|
|
__entry->obj = obj;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(i915_flip_complete,
|
2010-11-08 19:18:58 +00:00
|
|
|
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
|
2010-07-01 23:48:37 +00:00
|
|
|
|
|
|
|
TP_ARGS(plane, obj),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(int, plane)
|
2010-11-08 19:18:58 +00:00
|
|
|
__field(struct drm_i915_gem_object *, obj)
|
2010-07-01 23:48:37 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->plane = plane;
|
|
|
|
__entry->obj = obj;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
|
|
|
|
);
|
|
|
|
|
2013-07-19 19:36:56 +00:00
|
|
|
TRACE_EVENT_CONDITION(i915_reg_rw,
|
drm/i915: Type safe register read/write
Make I915_READ and I915_WRITE more type safe by wrapping the register
offset in a struct. This should eliminate most of the fumbles we've had
with misplaced parens.
This only takes care of normal mmio registers. We could extend the idea
to other register types and define each with its own struct. That way
you wouldn't be able to accidentally pass the wrong thing to a specific
register access function.
The gpio_reg setup is probably the ugliest thing left. But I figure I'd
just leave it for now, and wait for some divine inspiration to strike
before making it nice.
As for the generated code, it's actually a bit better sometimes. Eg.
looking at i915_irq_handler(), we can see the following change:
lea 0x70024(%rdx,%rax,1),%r9d
mov $0x1,%edx
- movslq %r9d,%r9
- mov %r9,%rsi
- mov %r9,-0x58(%rbp)
- callq *0xd8(%rbx)
+ mov %r9d,%esi
+ mov %r9d,-0x48(%rbp)
callq *0xd8(%rbx)
So previously gcc thought the register offset might be signed and
decided to sign extend it, just in case. The rest appears to be
mostly just minor shuffling of instructions.
v2: i915_mmio_reg_{offset,equal,valid}() helpers added
s/_REG/_MMIO/ in the register defines
mo more switch statements left to worry about
ring_emit stuff got sorted in a prep patch
cmd parser, lrc context and w/a batch buildup also in prep patch
vgpu stuff cleaned up and moved to a prep patch
all other unrelated changes split out
v3: Rebased due to BXT DSI/BLC, MOCS, etc.
v4: Rebased due to churn, s/i915_mmio_reg_t/i915_reg_t/
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1447853606-2751-1-git-send-email-ville.syrjala@linux.intel.com
2015-11-18 13:33:26 +00:00
|
|
|
TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
|
2011-08-16 19:34:10 +00:00
|
|
|
|
2013-07-19 19:36:56 +00:00
|
|
|
TP_ARGS(write, reg, val, len, trace),
|
|
|
|
|
|
|
|
TP_CONDITION(trace),
|
2011-08-16 19:34:10 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u64, val)
|
|
|
|
__field(u32, reg)
|
|
|
|
__field(u16, write)
|
|
|
|
__field(u16, len)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->val = (u64)val;
|
drm/i915: Type safe register read/write
Make I915_READ and I915_WRITE more type safe by wrapping the register
offset in a struct. This should eliminate most of the fumbles we've had
with misplaced parens.
This only takes care of normal mmio registers. We could extend the idea
to other register types and define each with its own struct. That way
you wouldn't be able to accidentally pass the wrong thing to a specific
register access function.
The gpio_reg setup is probably the ugliest thing left. But I figure I'd
just leave it for now, and wait for some divine inspiration to strike
before making it nice.
As for the generated code, it's actually a bit better sometimes. Eg.
looking at i915_irq_handler(), we can see the following change:
lea 0x70024(%rdx,%rax,1),%r9d
mov $0x1,%edx
- movslq %r9d,%r9
- mov %r9,%rsi
- mov %r9,-0x58(%rbp)
- callq *0xd8(%rbx)
+ mov %r9d,%esi
+ mov %r9d,-0x48(%rbp)
callq *0xd8(%rbx)
So previously gcc thought the register offset might be signed and
decided to sign extend it, just in case. The rest appears to be
mostly just minor shuffling of instructions.
v2: i915_mmio_reg_{offset,equal,valid}() helpers added
s/_REG/_MMIO/ in the register defines
mo more switch statements left to worry about
ring_emit stuff got sorted in a prep patch
cmd parser, lrc context and w/a batch buildup also in prep patch
vgpu stuff cleaned up and moved to a prep patch
all other unrelated changes split out
v3: Rebased due to BXT DSI/BLC, MOCS, etc.
v4: Rebased due to churn, s/i915_mmio_reg_t/i915_reg_t/
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1447853606-2751-1-git-send-email-ville.syrjala@linux.intel.com
2015-11-18 13:33:26 +00:00
|
|
|
__entry->reg = i915_mmio_reg_offset(reg);
|
2011-08-16 19:34:10 +00:00
|
|
|
__entry->write = write;
|
|
|
|
__entry->len = len;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
|
|
|
|
__entry->write ? "write" : "read",
|
|
|
|
__entry->reg, __entry->len,
|
|
|
|
(u32)(__entry->val & 0xffffffff),
|
|
|
|
(u32)(__entry->val >> 32))
|
2010-11-08 09:09:41 +00:00
|
|
|
);
|
|
|
|
|
2012-08-30 11:26:48 +00:00
|
|
|
TRACE_EVENT(intel_gpu_freq_change,
|
|
|
|
TP_PROTO(u32 freq),
|
|
|
|
TP_ARGS(freq),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, freq)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->freq = freq;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("new_freq=%u", __entry->freq)
|
|
|
|
);
|
|
|
|
|
2014-11-10 13:44:31 +00:00
|
|
|
/**
|
|
|
|
* DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
|
|
|
|
*
|
|
|
|
* With full ppgtt enabled each process using drm will allocate at least one
|
|
|
|
* translation table. With these traces it is possible to keep track of the
|
|
|
|
* allocation and of the lifetime of the tables; this can be used during
|
|
|
|
* testing/debug to verify that we are not leaking ppgtts.
|
|
|
|
* These traces identify the ppgtt through the vm pointer, which is also printed
|
|
|
|
* by the i915_vma_bind and i915_vma_unbind tracepoints.
|
|
|
|
*/
|
|
|
|
DECLARE_EVENT_CLASS(i915_ppgtt,
|
|
|
|
TP_PROTO(struct i915_address_space *vm),
|
|
|
|
TP_ARGS(vm),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(struct i915_address_space *, vm)
|
|
|
|
__field(u32, dev)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vm = vm;
|
2016-11-29 12:42:05 +00:00
|
|
|
__entry->dev = vm->i915->drm.primary->index;
|
2014-11-10 13:44:31 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
|
|
|
|
)
|
|
|
|
|
|
|
|
DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
|
|
|
|
TP_PROTO(struct i915_address_space *vm),
|
|
|
|
TP_ARGS(vm)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
|
|
|
|
TP_PROTO(struct i915_address_space *vm),
|
|
|
|
TP_ARGS(vm)
|
|
|
|
);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: i915_context_create and i915_context_free tracepoints
|
|
|
|
*
|
|
|
|
* These tracepoints are used to track creation and deletion of contexts.
|
|
|
|
* If full ppgtt is enabled, they also print the address of the vm assigned to
|
|
|
|
* the context.
|
|
|
|
*/
|
|
|
|
DECLARE_EVENT_CLASS(i915_context,
|
2016-05-24 13:53:34 +00:00
|
|
|
TP_PROTO(struct i915_gem_context *ctx),
|
2014-11-10 13:44:31 +00:00
|
|
|
TP_ARGS(ctx),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, dev)
|
2016-05-24 13:53:34 +00:00
|
|
|
__field(struct i915_gem_context *, ctx)
|
2014-11-10 13:44:31 +00:00
|
|
|
__field(struct i915_address_space *, vm)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->ctx = ctx;
|
|
|
|
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = ctx->i915->drm.primary->index;
|
2014-11-10 13:44:31 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
|
|
|
|
__entry->dev, __entry->ctx, __entry->vm)
|
|
|
|
)
|
|
|
|
|
|
|
|
DEFINE_EVENT(i915_context, i915_context_create,
|
2016-05-24 13:53:34 +00:00
|
|
|
TP_PROTO(struct i915_gem_context *ctx),
|
2014-11-10 13:44:31 +00:00
|
|
|
TP_ARGS(ctx)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(i915_context, i915_context_free,
|
2016-05-24 13:53:34 +00:00
|
|
|
TP_PROTO(struct i915_gem_context *ctx),
|
2014-11-10 13:44:31 +00:00
|
|
|
TP_ARGS(ctx)
|
|
|
|
);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DOC: switch_mm tracepoint
|
|
|
|
*
|
|
|
|
* This tracepoint allows tracking of the mm switch, which is an important point
|
|
|
|
* in the lifetime of the vm in the legacy submission path. This tracepoint is
|
|
|
|
* called only if full ppgtt is enabled.
|
|
|
|
*/
|
|
|
|
TRACE_EVENT(switch_mm,
|
2016-05-24 13:53:34 +00:00
|
|
|
TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
|
2014-11-10 13:44:31 +00:00
|
|
|
|
2016-03-16 11:00:38 +00:00
|
|
|
TP_ARGS(engine, to),
|
2014-11-10 13:44:31 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(u32, ring)
|
2016-05-24 13:53:34 +00:00
|
|
|
__field(struct i915_gem_context *, to)
|
2014-11-10 13:44:31 +00:00
|
|
|
__field(struct i915_address_space *, vm)
|
|
|
|
__field(u32, dev)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2016-03-16 11:00:38 +00:00
|
|
|
__entry->ring = engine->id;
|
2014-11-10 13:44:31 +00:00
|
|
|
__entry->to = to;
|
|
|
|
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
|
2016-07-05 09:40:23 +00:00
|
|
|
__entry->dev = engine->i915->drm.primary->index;
|
2014-11-10 13:44:31 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
|
|
|
|
__entry->dev, __entry->ring, __entry->to, __entry->vm)
|
|
|
|
);
|
|
|
|
|
2009-08-25 10:15:50 +00:00
|
|
|
#endif /* _I915_TRACE_H_ */
|
|
|
|
|
|
|
|
/* This part must be outside protection */
|
|
|
|
#undef TRACE_INCLUDE_PATH
|
2010-05-03 12:24:41 +00:00
|
|
|
#define TRACE_INCLUDE_PATH .
|
2009-08-25 10:15:50 +00:00
|
|
|
#include <trace/define_trace.h>
|