drm/i915: Polish for_each_dbuf_slice()
Now that we have the dbuf slice mask stored in the device info let's use it for for_each_dbuf_slice_in_mask*(). With this we cal also rip out intel_dbuf_size() and intel_dbuf_num_slices(). Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210416171011.19012-7-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
parent
d152bb1f67
commit
b88da66055
@ -390,7 +390,6 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
|
||||
const struct intel_crtc_state *crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
int max_bw = 0;
|
||||
int slice_id;
|
||||
enum pipe pipe;
|
||||
int i;
|
||||
|
||||
@ -418,6 +417,7 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
|
||||
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
|
||||
unsigned int data_rate = crtc_state->data_rate[plane_id];
|
||||
unsigned int dbuf_mask = 0;
|
||||
enum dbuf_slice slice;
|
||||
|
||||
dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
|
||||
dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
|
||||
@ -435,8 +435,8 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
|
||||
* pessimistic, which shouldn't pose any significant
|
||||
* problem anyway.
|
||||
*/
|
||||
for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
|
||||
crtc_bw->used_bw[slice_id] += data_rate;
|
||||
for_each_dbuf_slice_in_mask(dev_priv, slice, dbuf_mask)
|
||||
crtc_bw->used_bw[slice] += data_rate;
|
||||
}
|
||||
}
|
||||
|
||||
@ -445,10 +445,11 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
struct intel_dbuf_bw *crtc_bw;
|
||||
enum dbuf_slice slice;
|
||||
|
||||
crtc_bw = &new_bw_state->dbuf_bw[pipe];
|
||||
|
||||
for_each_dbuf_slice(slice_id) {
|
||||
for_each_dbuf_slice(dev_priv, slice) {
|
||||
/*
|
||||
* Current experimental observations show that contrary
|
||||
* to BSpec we get underruns once we exceed 64 * CDCLK
|
||||
@ -457,7 +458,7 @@ int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
|
||||
* bumped up all the time we calculate CDCLK according
|
||||
* to this formula for overall bw consumed by slices.
|
||||
*/
|
||||
max_bw += crtc_bw->used_bw[slice_id];
|
||||
max_bw += crtc_bw->used_bw[slice];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,12 +188,13 @@ enum plane_id {
|
||||
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
|
||||
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
|
||||
|
||||
#define for_each_dbuf_slice_in_mask(__slice, __mask) \
|
||||
#define for_each_dbuf_slice(__dev_priv, __slice) \
|
||||
for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
|
||||
for_each_if((BIT(__slice)) & (__mask))
|
||||
for_each_if(INTEL_INFO(__dev_priv)->dbuf.slice_mask & BIT(__slice))
|
||||
|
||||
#define for_each_dbuf_slice(__slice) \
|
||||
for_each_dbuf_slice_in_mask(__slice, BIT(I915_MAX_DBUF_SLICES) - 1)
|
||||
#define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \
|
||||
for_each_dbuf_slice((__dev_priv), (__slice)) \
|
||||
for_each_if((__mask) & BIT(__slice))
|
||||
|
||||
enum port {
|
||||
PORT_NONE = -1,
|
||||
|
@ -4772,13 +4772,13 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
|
||||
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
|
||||
u8 req_slices)
|
||||
{
|
||||
int num_slices = intel_dbuf_num_slices(dev_priv);
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
|
||||
enum dbuf_slice slice;
|
||||
|
||||
drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1),
|
||||
"Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n",
|
||||
req_slices, num_slices);
|
||||
drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
|
||||
"Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
|
||||
req_slices, slice_mask);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
|
||||
req_slices);
|
||||
@ -4792,7 +4792,7 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
mutex_lock(&power_domains->lock);
|
||||
|
||||
for (slice = DBUF_S1; slice < num_slices; slice++)
|
||||
for_each_dbuf_slice(dev_priv, slice)
|
||||
gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
|
||||
|
||||
dev_priv->dbuf.enabled_slices = req_slices;
|
||||
@ -4820,10 +4820,9 @@ static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int num_slices = intel_dbuf_num_slices(dev_priv);
|
||||
enum dbuf_slice slice;
|
||||
|
||||
for (slice = DBUF_S1; slice < (DBUF_S1 + num_slices); slice++)
|
||||
for_each_dbuf_slice(dev_priv, slice)
|
||||
intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
|
||||
DBUF_TRACKER_STATE_SERVICE_MASK,
|
||||
DBUF_TRACKER_STATE_SERVICE(8));
|
||||
|
@ -3636,16 +3636,16 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
|
||||
|
||||
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int i;
|
||||
int num_slices = intel_dbuf_num_slices(dev_priv);
|
||||
u8 enabled_slices_mask = 0;
|
||||
u8 enabled_slices = 0;
|
||||
enum dbuf_slice slice;
|
||||
|
||||
for (i = 0; i < num_slices; i++) {
|
||||
if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE)
|
||||
enabled_slices_mask |= BIT(i);
|
||||
for_each_dbuf_slice(dev_priv, slice) {
|
||||
if (intel_uncore_read(&dev_priv->uncore,
|
||||
DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
|
||||
enabled_slices |= BIT(slice);
|
||||
}
|
||||
|
||||
return enabled_slices_mask;
|
||||
return enabled_slices;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4028,20 +4028,10 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_dbuf_size(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_INFO(dev_priv)->dbuf.size;
|
||||
}
|
||||
|
||||
int intel_dbuf_num_slices(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
|
||||
}
|
||||
|
||||
static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return intel_dbuf_size(dev_priv) /
|
||||
intel_dbuf_num_slices(dev_priv);
|
||||
return INTEL_INFO(dev_priv)->dbuf.size /
|
||||
hweight8(INTEL_INFO(dev_priv)->dbuf.slice_mask);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -4060,7 +4050,7 @@ skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
|
||||
ddb->end = fls(slice_mask) * slice_size;
|
||||
|
||||
WARN_ON(ddb->start >= ddb->end);
|
||||
WARN_ON(ddb->end > intel_dbuf_size(dev_priv));
|
||||
WARN_ON(ddb->end > INTEL_INFO(dev_priv)->dbuf.size);
|
||||
}
|
||||
|
||||
u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
|
||||
@ -5820,10 +5810,10 @@ skl_compute_ddb(struct intel_atomic_state *state)
|
||||
return ret;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
|
||||
"Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x)\n",
|
||||
old_dbuf_state->enabled_slices,
|
||||
new_dbuf_state->enabled_slices,
|
||||
intel_dbuf_num_slices(dev_priv));
|
||||
INTEL_INFO(dev_priv)->dbuf.slice_mask);
|
||||
}
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
|
@ -38,7 +38,6 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
|
||||
int intel_dbuf_num_slices(struct drm_i915_private *dev_priv);
|
||||
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
|
||||
struct skl_ddb_entry *ddb_y,
|
||||
struct skl_ddb_entry *ddb_uv);
|
||||
|
Loading…
Reference in New Issue
Block a user