forked from Minki/linux
drm/i915: Move sandybride pcode access to intel_sideband.c
sandybride_pcode is another sideband, so move it to their new home. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190426081725.31217-8-chris@chris-wilson.co.uk
This commit is contained in:
parent
063203c013
commit
e0516e8364
@ -3420,16 +3420,6 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv);
|
||||
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
|
||||
struct intel_display_error_state *error);
|
||||
|
||||
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
|
||||
int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
|
||||
u32 val, int fast_timeout_us,
|
||||
int slow_timeout_ms);
|
||||
#define sandybridge_pcode_write(dev_priv, mbox, val) \
|
||||
sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
|
||||
|
||||
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
|
||||
u32 reply_mask, u32 reply, int timeout_base_ms);
|
||||
|
||||
/* intel_dpio_phy.c */
|
||||
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
|
||||
enum dpio_phy *phy, enum dpio_channel *ch);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "i915_reg.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
#define KEY_LOAD_TRIES 5
|
||||
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
|
||||
|
@ -9704,201 +9704,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv,
|
||||
u32 mbox)
|
||||
{
|
||||
switch (mbox & GEN6_PCODE_ERROR_MASK) {
|
||||
case GEN6_PCODE_SUCCESS:
|
||||
return 0;
|
||||
case GEN6_PCODE_UNIMPLEMENTED_CMD:
|
||||
return -ENODEV;
|
||||
case GEN6_PCODE_ILLEGAL_CMD:
|
||||
return -ENXIO;
|
||||
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||
return -EOVERFLOW;
|
||||
case GEN6_PCODE_TIMEOUT:
|
||||
return -ETIMEDOUT;
|
||||
default:
|
||||
MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv,
|
||||
u32 mbox)
|
||||
{
|
||||
switch (mbox & GEN6_PCODE_ERROR_MASK) {
|
||||
case GEN6_PCODE_SUCCESS:
|
||||
return 0;
|
||||
case GEN6_PCODE_ILLEGAL_CMD:
|
||||
return -ENXIO;
|
||||
case GEN7_PCODE_TIMEOUT:
|
||||
return -ETIMEDOUT;
|
||||
case GEN7_PCODE_ILLEGAL_DATA:
|
||||
return -EINVAL;
|
||||
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||
return -EOVERFLOW;
|
||||
default:
|
||||
MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int __sandybridge_pcode_rw(struct drm_i915_private *dev_priv,
|
||||
u32 mbox, u32 *val,
|
||||
int fast_timeout_us,
|
||||
int slow_timeout_ms,
|
||||
bool is_read)
|
||||
{
|
||||
lockdep_assert_held(&dev_priv->sb_lock);
|
||||
|
||||
/*
|
||||
* GEN6_PCODE_* are outside of the forcewake domain, we can
|
||||
* use te fw I915_READ variants to reduce the amount of work
|
||||
* required when reading/writing.
|
||||
*/
|
||||
|
||||
if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
|
||||
return -EAGAIN;
|
||||
|
||||
I915_WRITE_FW(GEN6_PCODE_DATA, *val);
|
||||
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
|
||||
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
|
||||
|
||||
if (__intel_wait_for_register_fw(&dev_priv->uncore,
|
||||
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
|
||||
fast_timeout_us,
|
||||
slow_timeout_ms,
|
||||
&mbox))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if (is_read)
|
||||
*val = I915_READ_FW(GEN6_PCODE_DATA);
|
||||
|
||||
if (INTEL_GEN(dev_priv) > 6)
|
||||
return gen7_check_mailbox_status(dev_priv, mbox);
|
||||
else
|
||||
return gen6_check_mailbox_status(dev_priv, mbox);
|
||||
}
|
||||
|
||||
int
|
||||
sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
err = __sandybridge_pcode_rw(dev_priv, mbox, val,
|
||||
500, 0,
|
||||
true);
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
if (err) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
|
||||
mbox, __builtin_return_address(0), err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
|
||||
u32 mbox, u32 val,
|
||||
int fast_timeout_us,
|
||||
int slow_timeout_ms)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
err = __sandybridge_pcode_rw(dev_priv, mbox, &val,
|
||||
fast_timeout_us, slow_timeout_ms,
|
||||
false);
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
if (err) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
|
||||
val, mbox, __builtin_return_address(0), err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
|
||||
u32 request, u32 reply_mask, u32 reply,
|
||||
u32 *status)
|
||||
{
|
||||
*status = __sandybridge_pcode_rw(dev_priv, mbox, &request,
|
||||
500, 0,
|
||||
true);
|
||||
|
||||
return *status || ((request & reply_mask) == reply);
|
||||
}
|
||||
|
||||
/**
|
||||
* skl_pcode_request - send PCODE request until acknowledgment
|
||||
* @dev_priv: device private
|
||||
* @mbox: PCODE mailbox ID the request is targeted for
|
||||
* @request: request ID
|
||||
* @reply_mask: mask used to check for request acknowledgment
|
||||
* @reply: value used to check for request acknowledgment
|
||||
* @timeout_base_ms: timeout for polling with preemption enabled
|
||||
*
|
||||
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
|
||||
* reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
|
||||
* The request is acknowledged once the PCODE reply dword equals @reply after
|
||||
* applying @reply_mask. Polling is first attempted with preemption enabled
|
||||
* for @timeout_base_ms and if this times out for another 50 ms with
|
||||
* preemption disabled.
|
||||
*
|
||||
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
|
||||
* other error as reported by PCODE.
|
||||
*/
|
||||
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
|
||||
u32 reply_mask, u32 reply, int timeout_base_ms)
|
||||
{
|
||||
u32 status;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
|
||||
#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
|
||||
&status)
|
||||
|
||||
/*
|
||||
* Prime the PCODE by doing a request first. Normally it guarantees
|
||||
* that a subsequent request, at most @timeout_base_ms later, succeeds.
|
||||
* _wait_for() doesn't guarantee when its passed condition is evaluated
|
||||
* first, so send the first request explicitly.
|
||||
*/
|
||||
if (COND) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* The above can time out if the number of requests was low (2 in the
|
||||
* worst case) _and_ PCODE was busy for some reason even after a
|
||||
* (queued) request and @timeout_base_ms delay. As a workaround retry
|
||||
* the poll with preemption disabled to maximize the number of
|
||||
* requests. Increase the timeout from @timeout_base_ms to 50ms to
|
||||
* account for interrupts that could reduce the number of these
|
||||
* requests, and for any quirks of the PCODE firmware that delays
|
||||
* the request completion.
|
||||
*/
|
||||
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
|
||||
WARN_ON_ONCE(timeout_base_ms > 3);
|
||||
preempt_disable();
|
||||
ret = wait_for_atomic(COND, 50);
|
||||
preempt_enable();
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
return ret ? ret : status;
|
||||
#undef COND
|
||||
}
|
||||
|
||||
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
||||
{
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
|
@ -333,3 +333,199 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
|
||||
{
|
||||
intel_sbi_rw(i915, reg, destination, &value, false);
|
||||
}
|
||||
|
||||
static inline int gen6_check_mailbox_status(u32 mbox)
|
||||
{
|
||||
switch (mbox & GEN6_PCODE_ERROR_MASK) {
|
||||
case GEN6_PCODE_SUCCESS:
|
||||
return 0;
|
||||
case GEN6_PCODE_UNIMPLEMENTED_CMD:
|
||||
return -ENODEV;
|
||||
case GEN6_PCODE_ILLEGAL_CMD:
|
||||
return -ENXIO;
|
||||
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||
return -EOVERFLOW;
|
||||
case GEN6_PCODE_TIMEOUT:
|
||||
return -ETIMEDOUT;
|
||||
default:
|
||||
MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int gen7_check_mailbox_status(u32 mbox)
|
||||
{
|
||||
switch (mbox & GEN6_PCODE_ERROR_MASK) {
|
||||
case GEN6_PCODE_SUCCESS:
|
||||
return 0;
|
||||
case GEN6_PCODE_ILLEGAL_CMD:
|
||||
return -ENXIO;
|
||||
case GEN7_PCODE_TIMEOUT:
|
||||
return -ETIMEDOUT;
|
||||
case GEN7_PCODE_ILLEGAL_DATA:
|
||||
return -EINVAL;
|
||||
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
|
||||
return -EOVERFLOW;
|
||||
default:
|
||||
MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
|
||||
u32 mbox, u32 *val,
|
||||
int fast_timeout_us,
|
||||
int slow_timeout_ms,
|
||||
bool is_read)
|
||||
{
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
|
||||
lockdep_assert_held(&i915->sb_lock);
|
||||
|
||||
/*
|
||||
* GEN6_PCODE_* are outside of the forcewake domain, we can
|
||||
* use te fw I915_READ variants to reduce the amount of work
|
||||
* required when reading/writing.
|
||||
*/
|
||||
|
||||
if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
|
||||
return -EAGAIN;
|
||||
|
||||
intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
|
||||
intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, 0);
|
||||
intel_uncore_write_fw(uncore,
|
||||
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
|
||||
|
||||
if (__intel_wait_for_register_fw(uncore,
|
||||
GEN6_PCODE_MAILBOX,
|
||||
GEN6_PCODE_READY, 0,
|
||||
fast_timeout_us,
|
||||
slow_timeout_ms,
|
||||
&mbox))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
if (is_read)
|
||||
*val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
|
||||
|
||||
if (INTEL_GEN(i915) > 6)
|
||||
return gen7_check_mailbox_status(mbox);
|
||||
else
|
||||
return gen6_check_mailbox_status(mbox);
|
||||
}
|
||||
|
||||
int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&i915->sb_lock);
|
||||
err = __sandybridge_pcode_rw(i915, mbox, val,
|
||||
500, 0,
|
||||
true);
|
||||
mutex_unlock(&i915->sb_lock);
|
||||
|
||||
if (err) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
|
||||
mbox, __builtin_return_address(0), err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
|
||||
u32 mbox, u32 val,
|
||||
int fast_timeout_us,
|
||||
int slow_timeout_ms)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&i915->sb_lock);
|
||||
err = __sandybridge_pcode_rw(i915, mbox, &val,
|
||||
fast_timeout_us, slow_timeout_ms,
|
||||
false);
|
||||
mutex_unlock(&i915->sb_lock);
|
||||
|
||||
if (err) {
|
||||
DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
|
||||
val, mbox, __builtin_return_address(0), err);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
|
||||
u32 request, u32 reply_mask, u32 reply,
|
||||
u32 *status)
|
||||
{
|
||||
*status = __sandybridge_pcode_rw(i915, mbox, &request,
|
||||
500, 0,
|
||||
true);
|
||||
|
||||
return *status || ((request & reply_mask) == reply);
|
||||
}
|
||||
|
||||
/**
|
||||
* skl_pcode_request - send PCODE request until acknowledgment
|
||||
* @i915: device private
|
||||
* @mbox: PCODE mailbox ID the request is targeted for
|
||||
* @request: request ID
|
||||
* @reply_mask: mask used to check for request acknowledgment
|
||||
* @reply: value used to check for request acknowledgment
|
||||
* @timeout_base_ms: timeout for polling with preemption enabled
|
||||
*
|
||||
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
|
||||
* reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
|
||||
* The request is acknowledged once the PCODE reply dword equals @reply after
|
||||
* applying @reply_mask. Polling is first attempted with preemption enabled
|
||||
* for @timeout_base_ms and if this times out for another 50 ms with
|
||||
* preemption disabled.
|
||||
*
|
||||
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
|
||||
* other error as reported by PCODE.
|
||||
*/
|
||||
int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
|
||||
u32 reply_mask, u32 reply, int timeout_base_ms)
|
||||
{
|
||||
u32 status;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&i915->sb_lock);
|
||||
|
||||
#define COND \
|
||||
skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
|
||||
|
||||
/*
|
||||
* Prime the PCODE by doing a request first. Normally it guarantees
|
||||
* that a subsequent request, at most @timeout_base_ms later, succeeds.
|
||||
* _wait_for() doesn't guarantee when its passed condition is evaluated
|
||||
* first, so send the first request explicitly.
|
||||
*/
|
||||
if (COND) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* The above can time out if the number of requests was low (2 in the
|
||||
* worst case) _and_ PCODE was busy for some reason even after a
|
||||
* (queued) request and @timeout_base_ms delay. As a workaround retry
|
||||
* the poll with preemption disabled to maximize the number of
|
||||
* requests. Increase the timeout from @timeout_base_ms to 50ms to
|
||||
* account for interrupts that could reduce the number of these
|
||||
* requests, and for any quirks of the PCODE firmware that delays
|
||||
* the request completion.
|
||||
*/
|
||||
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
|
||||
WARN_ON_ONCE(timeout_base_ms > 3);
|
||||
preempt_disable();
|
||||
ret = wait_for_atomic(COND, 50);
|
||||
preempt_enable();
|
||||
|
||||
out:
|
||||
mutex_unlock(&i915->sb_lock);
|
||||
return ret ? ret : status;
|
||||
#undef COND
|
||||
}
|
||||
|
@ -127,4 +127,14 @@ u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
|
||||
void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
|
||||
enum intel_sbi_destination destination);
|
||||
|
||||
int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val);
|
||||
int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
|
||||
u32 val, int fast_timeout_us,
|
||||
int slow_timeout_ms);
|
||||
#define sandybridge_pcode_write(i915, mbox, val) \
|
||||
sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
|
||||
|
||||
int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
|
||||
u32 reply_mask, u32 reply, int timeout_base_ms);
|
||||
|
||||
#endif /* _INTEL_SIDEBAND_H */
|
||||
|
Loading…
Reference in New Issue
Block a user