Merge tag 'drm-intel-fixes-2019-12-19' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Fix to drop an unused and harmful display W/A
- Fix to define EHL power wells independent of ICL
- Fix for priority inversion on bonded requests
- Fix in mmio offset calculation of DSB instance
- Fix memory leak from get_task_pid when banning clients
- Fixes to avoid dereference of uninitialized ops in dma_fence tracing
  and keep reference to execbuf object until submitted.

- Includes gvt-fixes-2019-12-18

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191219124635.GA16068@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
Dave Airlie 2019-12-21 06:08:19 +10:00
commit 0c517e6ced
14 changed files with 365 additions and 45 deletions

View File

@ -3688,6 +3688,151 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
};
static const struct i915_power_well_desc ehl_power_wells[] = {
{
.name = "always-on",
.always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_1,
.hsw.has_fuses = true,
},
},
{
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
.domains = ICL_PW_2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_2,
.hsw.has_fuses = true,
},
},
{
.name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
.hsw.irq_pipe_mask = BIT(PIPE_B),
.hsw.has_vga = true,
.hsw.has_fuses = true,
},
},
{
.name = "DDI A IO",
.domains = ICL_DDI_IO_A_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
},
},
{
.name = "DDI B IO",
.domains = ICL_DDI_IO_B_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
},
},
{
.name = "DDI C IO",
.domains = ICL_DDI_IO_C_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
},
},
{
.name = "DDI D IO",
.domains = ICL_DDI_IO_D_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_ddi_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
},
},
{
.name = "AUX A",
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
},
},
{
.name = "AUX B",
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
},
},
{
.name = "AUX C",
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
},
},
{
.name = "AUX D",
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
},
},
{
.name = "power well 4",
.domains = ICL_PW_4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_4,
.hsw.has_fuses = true,
.hsw.irq_pipe_mask = BIT(PIPE_C),
},
},
};
static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "always-on",
@ -3832,7 +3977,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX A",
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@ -3842,7 +3987,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX B",
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@ -3852,7 +3997,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX C",
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@ -4162,6 +4307,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_GEN(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
} else if (IS_ELKHARTLAKE(dev_priv)) {
err = set_power_wells(power_domains, ehl_power_wells);
} else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {

View File

@ -2167,8 +2167,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
ext_data.fpriv = file->driver_priv;
if (client_is_banned(ext_data.fpriv)) {
DRM_DEBUG("client %s[%d] banned from creating ctx\n",
current->comm,
pid_nr(get_task_pid(current, PIDTYPE_PID)));
current->comm, task_pid_nr(current));
return -EIO;
}

View File

@ -2694,6 +2694,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err = eb_submit(&eb);
err_request:
add_to_client(eb.request, file);
i915_request_get(eb.request);
i915_request_add(eb.request);
if (fences)
@ -2709,6 +2710,7 @@ err_request:
fput(out_fence->file);
}
}
i915_request_put(eb.request);
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)

View File

@ -36,13 +36,32 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
unsigned long size,
dma_addr_t dma_addr)
{
int ret = 0;
if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
ret = -EINVAL;
return ret;
}
static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
}
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_vgpu *vgpu;
struct sg_table *st;
struct scatterlist *sg;
int i, ret;
int i, j, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
u32 page_num;
@ -51,6 +70,10 @@ static int vgpu_gem_get_pages(
if (WARN_ON(!fb_info))
return -ENODEV;
vgpu = fb_info->obj->vgpu;
if (WARN_ON(!vgpu))
return -ENODEV;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (unlikely(!st))
return -ENOMEM;
@ -64,21 +87,53 @@ static int vgpu_gem_get_pages(
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
for_each_sg(st->sgl, sg, page_num, i) {
dma_addr_t dma_addr =
GEN8_DECODE_PTE(readq(&gtt_entries[i]));
if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
ret = -EINVAL;
goto out;
}
sg->offset = 0;
sg->length = PAGE_SIZE;
sg_dma_address(sg) =
GEN8_DECODE_PTE(readq(&gtt_entries[i]));
sg_dma_len(sg) = PAGE_SIZE;
sg_dma_address(sg) = dma_addr;
}
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
out:
if (ret) {
dma_addr_t dma_addr;
for_each_sg(st->sgl, sg, i, j) {
dma_addr = sg_dma_address(sg);
if (dma_addr)
vgpu_unpin_dma_address(vgpu, dma_addr);
}
sg_free_table(st);
kfree(st);
}
return ret;
return 0;
}
static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct scatterlist *sg;
if (obj->base.dma_buf) {
struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
struct intel_vgpu *vgpu = obj->vgpu;
int i;
for_each_sg(pages->sgl, sg, fb_info->size, i)
vgpu_unpin_dma_address(vgpu,
sg_dma_address(sg));
}
sg_free_table(pages);
kfree(pages);
}
@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
i915_gem_object_set_readonly(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;

View File

@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
engine_mask |= BIT(VCS1);
}
if (data & GEN9_GRDOM_GUC) {
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
}
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
static int guc_status_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
{
/* keep MIA_IN_RESET before clearing */
read_vreg(vgpu, offset, p_data, bytes);
vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
return 0;
}
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@ -2672,6 +2686,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
return 0;
}

View File

@ -62,6 +62,8 @@ struct intel_gvt_mpt {
unsigned long size, dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,

View File

@ -1916,6 +1916,28 @@ err_unlock:
return ret;
}
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
struct kvmgt_guest_info *info;
struct gvt_dma *entry;
int ret = 0;
if (!handle_valid(handle))
return -ENODEV;
info = (struct kvmgt_guest_info *)handle;
mutex_lock(&info->vgpu->vdev.cache_lock);
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
if (entry)
kref_get(&entry->ref);
else
ret = -ENOMEM;
mutex_unlock(&info->vgpu->vdev.cache_lock);
return ret;
}
static void __gvt_dma_release(struct kref *ref)
{
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
.gfn_to_mfn = kvmgt_gfn_to_pfn,
.dma_map_guest_page = kvmgt_dma_map_guest_page,
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
.dma_pin_guest_page = kvmgt_dma_pin_guest_page,
.set_opregion = kvmgt_set_opregion,
.set_edid = kvmgt_set_edid,
.get_vfio_device = kvmgt_get_vfio_device,

View File

@ -254,6 +254,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
}
/**
* intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
* @vgpu: a vGPU
* @dma_addr: guest dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int
intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
}
/**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU

View File

@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
*/
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
{
mutex_lock(&vgpu->gvt->lock);
mutex_lock(&vgpu->vgpu_lock);
vgpu->active = true;
mutex_unlock(&vgpu->gvt->lock);
mutex_unlock(&vgpu->vgpu_lock);
}
/**

View File

@ -9405,11 +9405,9 @@ enum skl_power_gate {
#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
#define _ICL_AUX_ANAOVRD1_A 0x162398
#define _ICL_AUX_ANAOVRD1_B 0x6C398
#define _TGL_AUX_ANAOVRD1_C 0x160398
#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
_ICL_AUX_ANAOVRD1_A, \
_ICL_AUX_ANAOVRD1_B, \
_TGL_AUX_ANAOVRD1_C))
_ICL_AUX_ANAOVRD1_B))
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
@ -11994,7 +11992,7 @@ enum skl_power_gate {
/* This register controls the Display State Buffer (DSB) engines. */
#define _DSBSL_INSTANCE_BASE 0x70B00
#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
(pipe) * 0x1000 + (id) * 100)
(pipe) * 0x1000 + (id) * 0x100)
#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)

View File

@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq)
}
static int
__i915_request_await_execution(struct i915_request *rq,
struct i915_request *signal,
void (*hook)(struct i915_request *rq,
struct dma_fence *signal),
gfp_t gfp)
__await_execution(struct i915_request *rq,
struct i915_request *signal,
void (*hook)(struct i915_request *rq,
struct dma_fence *signal),
gfp_t gfp)
{
struct execute_cb *cb;
@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq,
}
spin_unlock_irq(&signal->lock);
/* Copy across semaphore status as we need the same behaviour */
rq->sched.flags |= signal->sched.flags;
return 0;
}
@ -811,31 +813,21 @@ already_busywaiting(struct i915_request *rq)
}
static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
__emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
u32 seqno)
{
const int has_token = INTEL_GEN(to->i915) >= 12;
u32 hwsp_offset;
int len;
int len, err;
u32 *cs;
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
/* Just emit the first semaphore we see as request space is limited. */
if (already_busywaiting(to) & from->engine->mask)
goto await_fence;
if (i915_request_await_start(to, from) < 0)
goto await_fence;
/* Only submit our spinner after the signaler is running! */
if (__i915_request_await_execution(to, from, NULL, gfp))
goto await_fence;
/* We need to pin the signaler's HWSP until we are finished reading. */
if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
goto await_fence;
err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
return err;
len = 4;
if (has_token)
@ -858,7 +850,7 @@ emit_semaphore_wait(struct i915_request *to,
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD) +
has_token;
*cs++ = from->fence.seqno;
*cs++ = seqno;
*cs++ = hwsp_offset;
*cs++ = 0;
if (has_token) {
@ -867,6 +859,28 @@ emit_semaphore_wait(struct i915_request *to,
}
intel_ring_advance(to, cs);
return 0;
}
static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
/* Just emit the first semaphore we see as request space is limited. */
if (already_busywaiting(to) & from->engine->mask)
goto await_fence;
if (i915_request_await_start(to, from) < 0)
goto await_fence;
/* Only submit our spinner after the signaler is running! */
if (__await_execution(to, from, NULL, gfp))
goto await_fence;
if (__emit_semaphore_wait(to, from, from->fence.seqno))
goto await_fence;
to->sched.semaphores |= from->engine->mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
@ -980,6 +994,57 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
return 0;
}
static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
struct dma_fence *fence)
{
return __intel_timeline_sync_is_later(tl,
fence->context,
fence->seqno - 1);
}
static int intel_timeline_sync_set_start(struct intel_timeline *tl,
const struct dma_fence *fence)
{
return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
}
static int
__i915_request_await_execution(struct i915_request *to,
struct i915_request *from,
void (*hook)(struct i915_request *rq,
struct dma_fence *signal))
{
int err;
/* Submit both requests at the same time */
err = __await_execution(to, from, hook, I915_FENCE_GFP);
if (err)
return err;
/* Squash repeated depenendices to the same timelines */
if (intel_timeline_sync_has_start(i915_request_timeline(to),
&from->fence))
return 0;
/* Ensure both start together [after all semaphores in signal] */
if (intel_engine_has_semaphores(to->engine))
err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
else
err = i915_request_await_start(to, from);
if (err < 0)
return err;
/* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
err = i915_sched_node_add_dependency(&to->sched, &from->sched);
if (err < 0)
return err;
}
return intel_timeline_sync_set_start(i915_request_timeline(to),
&from->fence);
}
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence,
@ -1013,8 +1078,7 @@ i915_request_await_execution(struct i915_request *rq,
if (dma_fence_is_i915(fence))
ret = __i915_request_await_execution(rq,
to_request(fence),
hook,
I915_FENCE_GFP);
hook);
else
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
I915_FENCE_TIMEOUT,

View File

@ -474,7 +474,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);

View File

@ -78,12 +78,11 @@ static const struct dma_fence_ops fence_ops = {
void dma_fence_work_init(struct dma_fence_work *f,
const struct dma_fence_work_ops *ops)
{
f->ops = ops;
spin_lock_init(&f->lock);
dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
i915_sw_fence_init(&f->chain, fence_notify);
INIT_WORK(&f->work, fence_work);
f->ops = ops;
}
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)

View File

@ -4291,8 +4291,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
if (WARN_ON(wm->wm[level].min_ddb_alloc >
total[PLANE_CURSOR])) {
if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
}