Merge tag 'drm-intel-next-2016-01-24' of git://anongit.freedesktop.org/drm-intel into drm-next

- support for v3 vbt dsi blocks (Jani)
- improve mmio debug checks (Mika Kuoppala)
- reorg the ddi port translation table entries and related code (Ville)
- reorg gen8 interrupt handling for future platforms (Tvrtko)
- refactor tile width/height computations for framebuffers (Ville)
- kerneldoc integration for intel_pm.c (Jani)
- move default context from engines to device-global dev_priv (Dave Gordon)
- make seqno/irq ordering coherent with execlist (Chris)
- decouple internal engine number from UABI (Chris&Tvrtko)
- tons of small fixes all over, as usual

* tag 'drm-intel-next-2016-01-24' of git://anongit.freedesktop.org/drm-intel: (148 commits)
  drm/i915: Update DRIVER_DATE to 20160124
  drm/i915: Seal busy-ioctl uABI and prevent leaking of internal ids
  drm/i915: Decouple execbuf uAPI from internal implementation
  drm/i915: Use ordered seqno write interrupt generation on gen8+ execlists
  drm/i915: Limit the auto arming of mmio debugs on vlv/chv
  drm/i915: Tune down "GT register while GT waking disabled" message
  drm/i915: tidy up a few leftovers
  drm/i915: abolish separate per-ring default_context pointers
  drm/i915: simplify allocation of driver-internal requests
  drm/i915: Fix NULL plane->fb oops on SKL
  drm/i915: Do not put big intel_crtc_state on the stack
  Revert "drm/i915: Add two-stage ILK-style watermark programming (v10)"
  drm/i915: add DOC: headline to RC6 kernel-doc
  drm/i915: turn some bogus kernel-doc comments to normal comments
  drm/i915/sdvo: revert bogus kernel-doc comments to normal comments
  drm/i915/gen9: Correct max save/restore register count during gpu reset with GuC
  drm/i915: Demote user facing DMC firmware load failure message
  drm/i915: use hlist_for_each_entry
  drm/i915: skl_update_scaler() wants a rotation bitmask instead of bit number
  drm/i915: Don't reject primary plane windowing with color keying enabled on SKL+
  ...
This commit is contained in:
Dave Airlie 2016-02-09 10:27:41 +10:00
commit b039d6d025
50 changed files with 2740 additions and 1887 deletions

View File

@ -3319,6 +3319,12 @@ int num_ioctls;</synopsis>
!Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
!Idrivers/gpu/drm/i915/intel_csr.c
</sect2>
<sect2>
<title>Video BIOS Table (VBT)</title>
!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
!Idrivers/gpu/drm/i915/intel_bios.c
!Idrivers/gpu/drm/i915/intel_bios.h
</sect2>
</sect1>
<sect1>

View File

@ -1331,7 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct intel_engine_cs *ring;
u64 acthd[I915_NUM_RINGS];
u32 seqno[I915_NUM_RINGS];
int i;
u32 instdone[I915_NUM_INSTDONE_REG];
int i, j;
if (!i915.enable_hangcheck) {
seq_printf(m, "Hangcheck disabled\n");
@ -1345,6 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
acthd[i] = intel_ring_get_active_head(ring);
}
i915_get_extra_instdone(dev, instdone);
intel_runtime_pm_put(dev_priv);
if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
@ -1365,6 +1368,21 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
(long long)ring->hangcheck.max_acthd);
seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
if (ring->id == RCS) {
seq_puts(m, "\tinstdone read =");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x", instdone[j]);
seq_puts(m, "\n\tinstdone accu =");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x",
ring->hangcheck.instdone[j]);
seq_puts(m, "\n");
}
}
return 0;
@ -1942,11 +1960,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_puts(m, "HW context ");
describe_ctx(m, ctx);
for_each_ring(ring, dev_priv, i) {
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ",
ring->name);
}
if (ctx == dev_priv->kernel_context)
seq_printf(m, "(kernel context) ");
if (i915.enable_execlists) {
seq_putc(m, '\n');
@ -1976,12 +1991,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
static void i915_dump_lrc_obj(struct seq_file *m,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj)
struct intel_context *ctx,
struct intel_engine_cs *ring)
{
struct page *page;
uint32_t *reg_state;
int j;
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
unsigned long ggtt_offset = 0;
if (ctx_obj == NULL) {
@ -1991,7 +2007,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
}
seq_printf(m, "CONTEXT: %s %u\n", ring->name,
intel_execlists_ctx_id(ctx_obj));
intel_execlists_ctx_id(ctx, ring));
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
@ -2037,13 +2053,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
if (ret)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) {
for_each_ring(ring, dev_priv, i) {
if (ring->default_context != ctx)
i915_dump_lrc_obj(m, ring,
ctx->engine[i].state);
}
}
list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context)
for_each_ring(ring, dev_priv, i)
i915_dump_lrc_obj(m, ctx, ring);
mutex_unlock(&dev->struct_mutex);
@ -2092,13 +2105,13 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & 0x07;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += 6;
write_pointer += GEN8_CSB_ENTRIES;
seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
read_pointer, write_pointer);
for (i = 0; i < 6; i++) {
for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
@ -2115,11 +2128,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
struct drm_i915_gem_object *ctx_obj;
ctx_obj = head_req->ctx->engine[ring_id].state;
seq_printf(m, "\tHead request id: %u\n",
intel_execlists_ctx_id(ctx_obj));
intel_execlists_ctx_id(head_req->ctx, ring));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}

View File

@ -1079,7 +1079,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
*/
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
intel_prepare_ddi(dev);
return 0;
}
@ -1338,8 +1337,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
return 0;
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
wait_for_on ? "on" : "off",
I915_READ(VLV_GTLC_PW_STATUS));
onoff(wait_for_on),
I915_READ(VLV_GTLC_PW_STATUS));
/*
* RC6 transitioning can be delayed up to 2 msec (see
@ -1348,7 +1347,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
err = wait_for(COND, 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
wait_for_on ? "on" : "off");
onoff(wait_for_on));
return err;
#undef COND
@ -1359,7 +1358,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
return;
DRM_ERROR("GT register access while GT waking disabled\n");
DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
@ -1503,6 +1502,10 @@ static int intel_runtime_suspend(struct device *device)
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
dev_priv->pm.suspended = true;
/*
@ -1551,6 +1554,8 @@ static int intel_runtime_resume(struct device *device)
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
intel_guc_resume(dev);

View File

@ -34,6 +34,7 @@
#include <uapi/drm/drm_fourcc.h>
#include <drm/drmP.h>
#include "i915_params.h"
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
@ -58,7 +59,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20151218"
#define DRIVER_DATE "20160124"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@ -69,11 +70,11 @@
BUILD_BUG_ON(__i915_warn_cond); \
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#endif
#undef WARN_ON_ONCE
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
@ -87,31 +88,25 @@
*/
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
WARN(1, format); \
else \
if (unlikely(__ret_warn_on)) \
if (!WARN(i915.verbose_state_checks, format)) \
DRM_ERROR(format); \
} \
unlikely(__ret_warn_on); \
})
#define I915_STATE_WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) { \
if (i915.verbose_state_checks) \
WARN(1, "WARN_ON(" #condition ")\n"); \
else \
DRM_ERROR("WARN_ON(" #condition ")\n"); \
} \
unlikely(__ret_warn_on); \
})
#define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
}
static inline const char *onoff(bool v)
{
return v ? "on" : "off";
}
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
@ -339,7 +334,7 @@ struct drm_i915_file_private {
unsigned boosts;
} rps;
struct intel_engine_cs *bsd_ring;
unsigned int bsd_ring;
};
enum intel_dpll_id {
@ -633,6 +628,7 @@ struct drm_i915_display_funcs {
struct dpll *best_clock);
int (*compute_pipe_wm)(struct intel_crtc *crtc,
struct drm_atomic_state *state);
void (*program_watermarks)(struct intel_crtc_state *cstate);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@ -657,9 +653,6 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
void (*update_primary_plane)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
@ -726,6 +719,8 @@ struct intel_uncore {
i915_reg_t reg_post;
u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
int unclaimed_mmio_check;
};
/* Iterate over initialised fw domains */
@ -889,6 +884,9 @@ struct intel_context {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int pin_count;
struct i915_vma *lrc_vma;
u64 lrc_desc;
uint32_t *lrc_reg_state;
} engine[I915_NUM_RINGS];
struct list_head link;
@ -1301,7 +1299,7 @@ struct i915_gem_mm {
bool busy;
/* the indicator for dispatch video commands on two BSD rings */
int bsd_ring_dispatch_index;
unsigned int bsd_ring_dispatch_index;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
@ -1487,7 +1485,7 @@ struct intel_vbt_data {
u8 seq_version;
u32 size;
u8 *data;
u8 *sequence[MIPI_SEQ_MAX];
const u8 *sequence[MIPI_SEQ_MAX];
} dsi;
int crt_ddc_pin;
@ -1784,7 +1782,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
unsigned int cdclk_freq, max_cdclk_freq;
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
@ -1829,8 +1827,13 @@ struct drm_i915_private {
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif
/* dpll and cdclk state is protected by connection_mutex */
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
struct i915_workarounds workarounds;
@ -1945,6 +1948,8 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
struct intel_context *kernel_context;
bool edp_low_vswing;
/* perform PHY state sanity checks? */
@ -2265,9 +2270,9 @@ struct drm_i915_gem_request {
};
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out);
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@ -2576,6 +2581,11 @@ struct drm_i915_cmd_table {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
@ -2665,44 +2675,7 @@ extern int i915_max_ioctl;
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
/* i915_params.c */
struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_dc;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
bool enable_guc_submission;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
/* i915_dma.c */
/* i915_dma.c */
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@ -2745,7 +2718,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);

View File

@ -1251,7 +1251,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before, now;
s64 before = 0; /* Only to silence a compiler warning. */
int ret;
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
@ -1271,14 +1271,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
return -ETIME;
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
/*
* Record current time in case interrupted by signal, or wedged.
*/
before = ktime_get_raw_ns();
}
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req, state);
@ -1343,11 +1346,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
finish_wait(&ring->irq_queue, &wait);
out:
now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(req);
if (timeout) {
s64 tres = *timeout - (now - before);
s64 tres = *timeout - (ktime_get_raw_ns() - before);
*timeout = tres < 0 ? 0 : tres;
@ -2677,10 +2679,8 @@ void i915_gem_request_free(struct kref *req_ref)
i915_gem_request_remove_from_client(req);
if (ctx) {
if (i915.enable_execlists) {
if (ctx != req->ring->default_context)
intel_lr_context_unpin(req);
}
if (i915.enable_execlists && ctx != req->i915->kernel_context)
intel_lr_context_unpin(req);
i915_gem_context_unreference(ctx);
}
@ -2688,9 +2688,10 @@ void i915_gem_request_free(struct kref *req_ref)
kmem_cache_free(req->i915->requests, req);
}
int i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
static inline int
__i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *req;
@ -2753,6 +2754,31 @@ err:
return ret;
}
/**
* i915_gem_request_alloc - allocate a request structure
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
* This can be NULL if the request is not directly related to
* any specific user context, in which case this function will
* choose an appropriate context to use.
*
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx)
{
struct drm_i915_gem_request *req;
int err;
if (ctx == NULL)
ctx = to_i915(engine->dev)->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
@ -3170,9 +3196,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0;
if (*to_req == NULL) {
ret = i915_gem_request_alloc(to, to->default_context, to_req);
if (ret)
return ret;
struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(to, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
*to_req = req;
}
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
@ -3372,9 +3402,9 @@ int i915_gpu_idle(struct drm_device *dev)
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = i915_switch_context(req);
if (ret) {
@ -4328,10 +4358,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (ret)
goto unref;
BUILD_BUG_ON(I915_NUM_RINGS > 16);
args->busy = obj->active << 16;
if (obj->last_write_req)
args->busy |= obj->last_write_req->ring->id;
args->busy = 0;
if (obj->active) {
int i;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
if (req)
args->busy |= 1 << (16 + req->ring->exec_id);
}
if (obj->last_write_req)
args->busy |= obj->last_write_req->ring->exec_id;
}
unref:
drm_gem_object_unreference(&obj->base);
@ -4832,7 +4872,7 @@ i915_gem_init_hw(struct drm_device *dev)
*/
init_unused_rings(dev);
BUG_ON(!dev_priv->ring[RCS].default_context);
BUG_ON(!dev_priv->kernel_context);
ret = i915_ppgtt_init_hw(dev);
if (ret) {
@ -4869,10 +4909,9 @@ i915_gem_init_hw(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req;
WARN_ON(!ring->default_context);
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret) {
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
@ -5112,6 +5151,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_ring = -1;
ret = i915_gem_context_open(dev, file);
if (ret)
kfree(file_priv);

View File

@ -347,22 +347,20 @@ void i915_gem_context_reset(struct drm_device *dev)
i915_gem_context_unreference(lctx);
ring->last_context = NULL;
}
/* Force the GPU state to be reinitialised on enabling */
if (ring->default_context)
ring->default_context->legacy_hw_ctx.initialized = false;
}
/* Force the GPU state to be reinitialised on enabling */
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int i;
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->ring[RCS].default_context))
if (WARN_ON(dev_priv->kernel_context))
return 0;
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
@ -392,12 +390,7 @@ int i915_gem_context_init(struct drm_device *dev)
return PTR_ERR(ctx);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
/* NB: RCS will hold a ref for all rings */
ring->default_context = ctx;
}
dev_priv->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
@ -408,7 +401,7 @@ int i915_gem_context_init(struct drm_device *dev)
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
struct intel_context *dctx = dev_priv->kernel_context;
int i;
if (dctx->legacy_hw_ctx.rcs_state) {
@ -435,17 +428,17 @@ void i915_gem_context_fini(struct drm_device *dev)
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
for (i = I915_NUM_RINGS; --i >= 0;) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
if (ring->last_context)
if (ring->last_context) {
i915_gem_context_unreference(ring->last_context);
ring->default_context = NULL;
ring->last_context = NULL;
ring->last_context = NULL;
}
}
i915_gem_context_unreference(dctx);
dev_priv->kernel_context = NULL;
}
int i915_gem_context_enable(struct drm_i915_gem_request *req)

View File

@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
return eb->lut[handle];
} else {
struct hlist_head *head;
struct hlist_node *node;
struct i915_vma *vma;
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
struct i915_vma *vma;
vma = hlist_entry(node, struct i915_vma, exec_node);
hlist_for_each_entry(vma, head, exec_node) {
if (vma->exec_handle == handle)
return vma;
}
@ -1309,6 +1306,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
exec_start = params->batch_obj_vm_offset +
params->args_batch_start_offset;
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
@ -1325,33 +1325,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
* The ring index is returned.
*/
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
struct drm_file *file)
static unsigned int
gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
/* Check whether the file_priv is using one ring */
if (file_priv->bsd_ring)
return file_priv->bsd_ring->id;
else {
/* If no, use the ping-pong mechanism to select one ring */
int ring_id;
mutex_lock(&dev->struct_mutex);
if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
ring_id = VCS;
dev_priv->mm.bsd_ring_dispatch_index = 1;
} else {
ring_id = VCS2;
dev_priv->mm.bsd_ring_dispatch_index = 0;
}
file_priv->bsd_ring = &dev_priv->ring[ring_id];
mutex_unlock(&dev->struct_mutex);
return ring_id;
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_ring < 0) {
/* If not, use the ping-pong mechanism to select one. */
mutex_lock(&dev_priv->dev->struct_mutex);
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
mutex_unlock(&dev_priv->dev->struct_mutex);
}
return file_priv->bsd_ring;
}
static struct drm_i915_gem_object *
@ -1374,6 +1364,63 @@ eb_get_batch(struct eb_vmas *eb)
return vma->obj;
}
#define I915_USER_RINGS (4)
static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_DEFAULT] = RCS,
[I915_EXEC_RENDER] = RCS,
[I915_EXEC_BLT] = BCS,
[I915_EXEC_BSD] = VCS,
[I915_EXEC_VEBOX] = VECS
};
static int
eb_select_ring(struct drm_i915_private *dev_priv,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct intel_engine_cs **ring)
{
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
if (user_ring_id > I915_USER_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
return -EINVAL;
}
if ((user_ring_id != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx--;
} else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx);
return -EINVAL;
}
*ring = &dev_priv->ring[_VCS(bsd_idx)];
} else {
*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
}
if (!intel_ring_initialized(*ring)) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return -EINVAL;
}
return 0;
}
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
@ -1381,6 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
@ -1411,51 +1459,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
switch (args->flags & I915_EXEC_BSD_MASK) {
case I915_EXEC_BSD_DEFAULT:
ring_id = gen8_dispatch_bsd_ring(dev, file);
ring = &dev_priv->ring[ring_id];
break;
case I915_EXEC_BSD_RING1:
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BSD_RING2:
ring = &dev_priv->ring[VCS2];
break;
default:
DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
(int)(args->flags & I915_EXEC_BSD_MASK));
return -EINVAL;
}
} else
ring = &dev_priv->ring[VCS];
} else
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
ret = eb_select_ring(dev_priv, file, args, &ring);
if (ret)
return ret;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
@ -1602,11 +1608,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
ret = i915_gem_request_alloc(ring, ctx, &params->request);
if (ret)
req = i915_gem_request_alloc(ring, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
}
ret = i915_gem_request_add_to_client(params->request, file);
ret = i915_gem_request_add_to_client(req, file);
if (ret)
goto err_batch_unpin;
@ -1622,6 +1630,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
@ -1645,8 +1654,8 @@ err:
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
if (ret && params->request)
i915_gem_request_cancel(params->request);
if (ret && req)
i915_gem_request_cancel(req);
mutex_unlock(&dev->struct_mutex);

View File

@ -96,9 +96,11 @@
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
const struct i915_ggtt_view i915_ggtt_view_normal;
const struct i915_ggtt_view i915_ggtt_view_normal = {
.type = I915_GGTT_VIEW_NORMAL,
};
const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED
.type = I915_GGTT_VIEW_ROTATED,
};
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
@ -3329,7 +3331,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
}
static struct scatterlist *
rotate_pages(dma_addr_t *in, unsigned int offset,
rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int width, unsigned int height,
struct sg_table *st, struct scatterlist *sg)
{

View File

@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)

View File

@ -47,6 +47,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
static int num_vma_bound(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int count = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (drm_mm_node_allocated(&vma->node))
count++;
if (vma->pin_count)
count++;
}
return count;
}
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
}
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
/* Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
*
* If the pages are pinned for any other reason than being bound
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
if (obj->pages_pin_count != num_vma_bound(obj))
return false;
/* We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
return swap_available() || obj->madv == I915_MADV_DONTNEED;
}
/**
* i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device
@ -129,6 +169,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue;
if (!can_release_pages(obj))
continue;
drm_gem_object_reference(&obj->base);
/* For the unbound phase, this should be a no-op! */
@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
return true;
}
static int num_vma_bound(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int count = 0;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (drm_mm_node_allocated(&vma->node))
count++;
if (vma->pin_count)
count++;
}
return count;
}
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
if (!obj->active && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}

View File

@ -569,6 +569,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj->pages == NULL)
goto cleanup;
obj->get_page.sg = obj->pages->sgl;
obj->get_page.last = 0;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;

View File

@ -1050,7 +1050,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
else
rbuf = ring->default_context->engine[ring->id].ringbuf;
rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
} else
rbuf = ring->buffer;

View File

@ -40,6 +40,7 @@
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_MAX_COUNT 64

View File

@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
(IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
if (!intel_enable_rc6(dev) ||
NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
}
/* Finally, update the cached copy of the GuC's WQ head */
gc->wq_head = desc->head;
kunmap_atomic(base);
return ret;
}
@ -471,28 +472,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
sizeof(desc) * client->ctx_index);
}
/* Get valid workqueue item and return it back to offset */
static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
if (!gc)
return 0;
/* Quickly return if wq space is available since last time we cache the
* head position. */
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
return 0;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
*offset = gc->wq_tail;
gc->wq_head = desc->head;
/* advance the tail for next workqueue item */
gc->wq_tail += size;
gc->wq_tail &= gc->wq_size - 1;
/* this will break the loop */
timeout_counter = 0;
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
ret = 0;
break;
}
if (timeout_counter)
@ -510,12 +513,16 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off = 0;
int ret;
u32 tail, wq_len, wq_off, space;
ret = guc_get_workqueue_space(gc, &wq_off);
if (ret)
return ret;
space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
if (WARN_ON(space < sizeof(struct guc_wq_item)))
return -ENOSPC; /* shouldn't happen */
/* postincrement WQ tail for next time */
wq_off = gc->wq_tail;
gc->wq_tail += sizeof(struct guc_wq_item);
gc->wq_tail &= gc->wq_size - 1;
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@ -832,6 +839,96 @@ static void guc_create_log(struct intel_guc *guc)
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
static void init_guc_policies(struct guc_policies *policies)
{
struct guc_policy *policy;
u32 p, i;
policies->dpc_promote_time = 500000;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
for (i = 0; i < I915_NUM_RINGS; i++) {
policy = &policies->policy[p][i];
policy->execution_quantum = 1000000;
policy->preemption_time = 500000;
policy->fault_time = 250000;
policy->policy_flags = 0;
}
}
policies->is_valid = 1;
}
static void guc_create_ads(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *ring;
struct page *page;
u32 size, i;
/* The ads obj includes the struct itself and buffers passed to GuC */
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
sizeof(struct guc_mmio_reg_state) +
GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
obj = guc->ads_obj;
if (!obj) {
obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
if (!obj)
return;
guc->ads_obj = obj;
}
page = i915_gem_object_get_page(obj, 0);
ads = kmap(page);
/*
* The GuC requires a "Golden Context" when it reinitialises
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
* to find it.
*/
ring = &dev_priv->ring[RCS];
ads->golden_context_lrca = ring->status_page.gfx_addr;
for_each_ring(ring, dev_priv, i)
ads->eng_state_size[i] = intel_lr_context_size(ring);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
init_guc_policies(policies);
ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
sizeof(struct guc_ads);
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
for (i = 0; i < I915_NUM_RINGS; i++) {
reg_state->mmio_white_list[i].mmio_start =
dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
reg_state->mmio_white_list[i].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
sizeof(struct guc_policies);
ads->reg_state_buffer = ads->reg_state_addr +
sizeof(struct guc_mmio_reg_state);
kunmap(page);
}
/*
* Set up the memory resources to be shared with the GuC. At this point,
* we require just one object that can be mapped through the GGTT.
@ -858,6 +955,8 @@ int i915_guc_submission_init(struct drm_device *dev)
guc_create_log(guc);
guc_create_ads(guc);
return 0;
}
@ -865,7 +964,7 @@ int i915_guc_submission_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct intel_context *ctx = dev_priv->ring[RCS].default_context;
struct intel_context *ctx = dev_priv->kernel_context;
struct i915_guc_client *client;
/* client for execbuf submission */
@ -896,6 +995,9 @@ void i915_guc_submission_fini(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
gem_release_guc_obj(dev_priv->guc.ads_obj);
guc->ads_obj = NULL;
gem_release_guc_obj(dev_priv->guc.log_obj);
guc->log_obj = NULL;
@ -919,7 +1021,7 @@ int intel_guc_suspend(struct drm_device *dev)
if (!i915.enable_guc_submission)
return 0;
ctx = dev_priv->ring[RCS].default_context;
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
@ -945,7 +1047,7 @@ int intel_guc_resume(struct drm_device *dev)
if (!i915.enable_guc_submission)
return 0;
ctx = dev_priv->ring[RCS].default_context;
ctx = dev_priv->kernel_context;
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;

View File

@ -2188,10 +2188,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@ -2268,43 +2264,20 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
static irqreturn_t gen8_irq_handler(int irq, void *arg)
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl;
struct drm_device *dev = dev_priv->dev;
irqreturn_t ret = IRQ_NONE;
uint32_t tmp = 0;
u32 iir;
enum pipe pipe;
u32 aux_mask = GEN8_AUX_CHANNEL_A;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
if (INTEL_INFO(dev_priv)->gen >= 9)
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
goto out;
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
/* Find, clear, then process each source of interrupt */
ret = gen8_gt_irq_handler(dev_priv, master_ctl);
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
iir = I915_READ(GEN8_DE_MISC_IIR);
if (iir) {
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
if (tmp & GEN8_DE_MISC_GSE)
if (iir & GEN8_DE_MISC_GSE)
intel_opregion_asle_intr(dev);
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
@ -2314,33 +2287,40 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
if (tmp) {
iir = I915_READ(GEN8_DE_PORT_IIR);
if (iir) {
u32 tmp_mask;
bool found = false;
u32 hotplug_trigger = 0;
if (IS_BROXTON(dev_priv))
hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
if (tmp & aux_mask) {
tmp_mask = GEN8_AUX_CHANNEL_A;
if (INTEL_INFO(dev_priv)->gen >= 9)
tmp_mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
if (iir & tmp_mask) {
dp_aux_irq_handler(dev);
found = true;
}
if (hotplug_trigger) {
if (IS_BROXTON(dev))
bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
else
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
found = true;
if (IS_BROXTON(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
found = true;
}
} else if (IS_BROADWELL(dev_priv)) {
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
if (tmp_mask) {
ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
found = true;
}
}
if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev);
found = true;
}
@ -2353,49 +2333,51 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
for_each_pipe(dev_priv, pipe) {
uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
u32 flip_done, fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (pipe_iir) {
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
if (pipe_iir & GEN8_PIPE_VBLANK &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
if (INTEL_INFO(dev_priv)->gen >= 9)
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
else
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
if (flip_done) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
}
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
pipe);
if (INTEL_INFO(dev_priv)->gen >= 9)
fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors)
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
pipe_name(pipe),
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
} else
iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
continue;
}
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK &&
intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
flip_done = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
else
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
if (flip_done) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
}
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
fault_errors = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors)
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
pipe_name(pipe),
fault_errors);
}
if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
@ -2405,15 +2387,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
u32 pch_iir = I915_READ(SDEIIR);
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
iir = I915_READ(SDEIIR);
if (iir) {
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv))
spt_irq_handler(dev, pch_iir);
spt_irq_handler(dev, iir);
else
cpt_irq_handler(dev, pch_iir);
cpt_irq_handler(dev, iir);
} else {
/*
* Like on previous PCH there seems to be something
@ -2423,10 +2405,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
}
return ret;
}
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl;
irqreturn_t ret;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
return IRQ_NONE;
I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
/* Find, clear, then process each source of interrupt */
ret = gen8_gt_irq_handler(dev_priv, master_ctl);
ret |= gen8_de_irq_handler(dev_priv, master_ctl);
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ);
out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
@ -2949,14 +2957,44 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
ring->hangcheck.deadlock = 0;
}
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
static bool subunits_stuck(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
u32 instdone[I915_NUM_INSTDONE_REG];
bool stuck;
int i;
if (ring->id != RCS)
return true;
i915_get_extra_instdone(ring->dev, instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
* accumulating the undone -> done transitions and only
* consider those as progress.
*/
stuck = true;
for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
if (tmp != ring->hangcheck.instdone[i])
stuck = false;
ring->hangcheck.instdone[i] |= tmp;
}
return stuck;
}
static enum intel_ring_hangcheck_action
head_stuck(struct intel_engine_cs *ring, u64 acthd)
{
if (acthd != ring->hangcheck.acthd) {
/* Clear subunit states on head movement */
memset(ring->hangcheck.instdone, 0,
sizeof(ring->hangcheck.instdone));
if (acthd > ring->hangcheck.max_acthd) {
ring->hangcheck.max_acthd = acthd;
return HANGCHECK_ACTIVE;
@ -2965,6 +3003,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_ACTIVE_LOOP;
}
if (!subunits_stuck(ring))
return HANGCHECK_ACTIVE;
return HANGCHECK_HUNG;
}
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *ring, u64 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_ring_hangcheck_action ha;
u32 tmp;
ha = head_stuck(ring, acthd);
if (ha != HANGCHECK_HUNG)
return ha;
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
@ -3032,6 +3088,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_ring(ring, dev_priv, i) {
u64 acthd;
u32 seqno;
@ -3106,7 +3168,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
/* Clear head and subunit states on seqno movement */
ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
memset(ring->hangcheck.instdone, 0,
sizeof(ring->hangcheck.instdone));
}
ring->hangcheck.seqno = seqno;

View File

@ -22,6 +22,7 @@
* IN THE SOFTWARE.
*/
#include "i915_params.h"
#include "i915_drv.h"
struct i915_params i915 __read_mostly = {

View File

@ -0,0 +1,68 @@
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _I915_PARAMS_H_
#define _I915_PARAMS_H_
#include <linux/cache.h> /* for __read_mostly */
struct i915_params {
int modeset;
int panel_ignore_lid;
int semaphores;
int lvds_channel_mode;
int panel_use_ssc;
int vbt_sdvo_panel_type;
int enable_rc6;
int enable_dc;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
int edp_vswing;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
bool load_detect_test;
bool reset;
bool disable_display;
bool disable_vtd_wa;
bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
};
extern struct i915_params i915 __read_mostly;
#endif

View File

@ -1711,6 +1711,11 @@ enum skl_disp_power_wells {
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
#define CLAIM_ER_CLR (1 << 31)
#define CLAIM_ER_OVERFLOW (1 << 16)
#define CLAIM_ER_CTR_MASK 0xffff
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
#define DERRMR_PIPEA_SCANLINE (1<<0)

View File

@ -164,7 +164,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
@ -200,7 +200,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
@ -521,7 +521,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct device *kdev = kobj_to_dev(kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
@ -556,7 +556,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct device *kdev = kobj_to_dev(kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
int ret;

View File

@ -308,5 +308,5 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
state->dpll_set = false;
state->dpll_set = state->modeset = false;
}

View File

@ -152,9 +152,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
crtc_state->base.active ? crtc_state->pipe_src_w : 0;
crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 =
crtc_state->base.active ? crtc_state->pipe_src_h : 0;
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@ -194,8 +194,16 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(old_state->state, crtc);
intel_plane->commit_plane(plane, intel_state);
if (intel_state->visible)
intel_plane->update_plane(plane,
to_intel_crtc_state(crtc_state),
intel_state);
else
intel_plane->disable_plane(plane, crtc);
}
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {

View File

@ -31,11 +31,49 @@
#include "i915_drv.h"
#include "intel_bios.h"
/**
* DOC: Video BIOS Table (VBT)
*
* The Video BIOS Table, or VBT, provides platform and board specific
* configuration information to the driver that is not discoverable or available
* through other means. The configuration is mostly related to display
* hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
* the PCI ROM.
*
* The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
* Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
* contain the actual configuration information. The VBT Header, and thus the
* VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
* BDB Header. The data blocks are concatenated after the BDB Header. The data
* blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
* data. (Block 53, the MIPI Sequence Block is an exception.)
*
* The driver parses the VBT during load. The relevant information is stored in
* driver private data for ease of use, and the actual VBT is not read after
* that.
*/
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
static int panel_type;
/* Get BDB block size given a pointer to Block ID. */
static u32 _get_blocksize(const u8 *block_base)
{
/* The MIPI Sequence Block v3+ has a separate size field. */
if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
return *((const u32 *)(block_base + 4));
else
return *((const u16 *)(block_base + 1));
}
/* Get BDB block size give a pointer to data after Block ID and Block Size. */
static u32 get_blocksize(const void *block_data)
{
return _get_blocksize(block_data - 3);
}
static const void *
find_section(const void *_bdb, int section_id)
{
@ -52,14 +90,8 @@ find_section(const void *_bdb, int section_id)
/* walk the sections looking for section_id */
while (index + 3 < total) {
current_id = *(base + index);
index++;
current_size = *((const u16 *)(base + index));
index += 2;
/* The MIPI Sequence Block v3+ has a separate size field. */
if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
current_size = *((const u32 *)(base + index + 1));
current_size = _get_blocksize(base + index);
index += 3;
if (index + current_size > total)
return NULL;
@ -73,16 +105,6 @@ find_section(const void *_bdb, int section_id)
return NULL;
}
static u16
get_blocksize(const void *p)
{
u16 *block_ptr, block_size;
block_ptr = (u16 *)((char *)p - 2);
block_size = *block_ptr;
return block_size;
}
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
const struct lvds_dvo_timing *dvo_timing)
@ -675,84 +697,13 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
static u8 *goto_next_sequence(u8 *data, int *size)
{
u16 len;
int tmp = *size;
if (--tmp < 0)
return NULL;
/* goto first element */
data++;
while (1) {
switch (*data) {
case MIPI_SEQ_ELEM_SEND_PKT:
/*
* skip by this element payload size
* skip elem id, command flag and data type
*/
tmp -= 5;
if (tmp < 0)
return NULL;
data += 3;
len = *((u16 *)data);
tmp -= len;
if (tmp < 0)
return NULL;
/* skip by len */
data = data + 2 + len;
break;
case MIPI_SEQ_ELEM_DELAY:
/* skip by elem id, and delay is 4 bytes */
tmp -= 5;
if (tmp < 0)
return NULL;
data += 5;
break;
case MIPI_SEQ_ELEM_GPIO:
tmp -= 3;
if (tmp < 0)
return NULL;
data += 3;
break;
default:
DRM_ERROR("Unknown element\n");
return NULL;
}
/* end of sequence ? */
if (*data == 0)
break;
}
/* goto next sequence or end of block byte */
if (--tmp < 0)
return NULL;
data++;
/* update amount of data left for the sequence block to be parsed */
*size = tmp;
return data;
}
static void
parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_mipi_config *start;
const struct bdb_mipi_sequence *sequence;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
u8 *data;
const u8 *seq_data;
int i, panel_id, seq_size;
u16 block_size;
/* parse MIPI blocks only if LFP type is MIPI */
if (!dev_priv->vbt.has_mipi)
@ -798,8 +749,178 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
/* Find the sequence block and size for the given panel. */
static const u8 *
find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
u16 panel_id, u32 *seq_size)
{
u32 total = get_blocksize(sequence);
const u8 *data = &sequence->data[0];
u8 current_id;
u32 current_size;
int header_size = sequence->version >= 3 ? 5 : 3;
int index = 0;
int i;
/* skip new block size */
if (sequence->version >= 3)
data += 4;
for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
if (index + header_size > total) {
DRM_ERROR("Invalid sequence block (header)\n");
return NULL;
}
current_id = *(data + index);
if (sequence->version >= 3)
current_size = *((const u32 *)(data + index + 1));
else
current_size = *((const u16 *)(data + index + 1));
index += header_size;
if (index + current_size > total) {
DRM_ERROR("Invalid sequence block\n");
return NULL;
}
if (current_id == panel_id) {
*seq_size = current_size;
return data + index;
}
index += current_size;
}
DRM_ERROR("Sequence block detected but no valid configuration\n");
return NULL;
}
static int goto_next_sequence(const u8 *data, int index, int total)
{
u16 len;
/* Skip Sequence Byte. */
for (index = index + 1; index < total; index += len) {
u8 operation_byte = *(data + index);
index++;
switch (operation_byte) {
case MIPI_SEQ_ELEM_END:
return index;
case MIPI_SEQ_ELEM_SEND_PKT:
if (index + 4 > total)
return 0;
len = *((const u16 *)(data + index + 2)) + 4;
break;
case MIPI_SEQ_ELEM_DELAY:
len = 4;
break;
case MIPI_SEQ_ELEM_GPIO:
len = 2;
break;
case MIPI_SEQ_ELEM_I2C:
if (index + 7 > total)
return 0;
len = *(data + index + 6) + 7;
break;
default:
DRM_ERROR("Unknown operation byte\n");
return 0;
}
}
return 0;
}
static int goto_next_sequence_v3(const u8 *data, int index, int total)
{
int seq_end;
u16 len;
u32 size_of_sequence;
/*
* Could skip sequence based on Size of Sequence alone, but also do some
* checking on the structure.
*/
if (total < 5) {
DRM_ERROR("Too small sequence size\n");
return 0;
}
/* Skip Sequence Byte. */
index++;
/*
* Size of Sequence. Excludes the Sequence Byte and the size itself,
* includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
* byte.
*/
size_of_sequence = *((const uint32_t *)(data + index));
index += 4;
seq_end = index + size_of_sequence;
if (seq_end > total) {
DRM_ERROR("Invalid sequence size\n");
return 0;
}
for (; index < total; index += len) {
u8 operation_byte = *(data + index);
index++;
if (operation_byte == MIPI_SEQ_ELEM_END) {
if (index != seq_end) {
DRM_ERROR("Invalid element structure\n");
return 0;
}
return index;
}
len = *(data + index);
index++;
/*
* FIXME: Would be nice to check elements like for v1/v2 in
* goto_next_sequence() above.
*/
switch (operation_byte) {
case MIPI_SEQ_ELEM_SEND_PKT:
case MIPI_SEQ_ELEM_DELAY:
case MIPI_SEQ_ELEM_GPIO:
case MIPI_SEQ_ELEM_I2C:
case MIPI_SEQ_ELEM_SPI:
case MIPI_SEQ_ELEM_PMIC:
break;
default:
DRM_ERROR("Unknown operation byte %u\n",
operation_byte);
break;
}
}
return 0;
}
static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;
u8 *data;
int index = 0;
/* Only our generic panel driver uses the sequence block. */
if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
return;
/* Check if we have sequence block as well */
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
if (!sequence) {
DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
@ -807,95 +928,54 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
}
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 3) {
DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
if (sequence->version >= 4) {
DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
sequence->version);
return;
}
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
block_size = get_blocksize(sequence);
seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
if (!seq_data)
return;
/*
* parse the sequence block for individual sequences
*/
dev_priv->vbt.dsi.seq_version = sequence->version;
data = kmemdup(seq_data, seq_size, GFP_KERNEL);
if (!data)
return;
seq_data = &sequence->data[0];
/*
* sequence block is variable length and hence we need to parse and
* get the sequence data for specific panel id
*/
for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
panel_id = *seq_data;
seq_size = *((u16 *) (seq_data + 1));
if (panel_id == panel_type)
/* Parse the sequences, store pointers to each sequence. */
for (;;) {
u8 seq_id = *(data + index);
if (seq_id == MIPI_SEQ_END)
break;
/* skip the sequence including seq header of 3 bytes */
seq_data = seq_data + 3 + seq_size;
if ((seq_data - &sequence->data[0]) > block_size) {
DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
return;
if (seq_id >= MIPI_SEQ_MAX) {
DRM_ERROR("Unknown sequence %u\n", seq_id);
goto err;
}
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
if (sequence->version >= 3)
index = goto_next_sequence_v3(data, index, seq_size);
else
index = goto_next_sequence(data, index, seq_size);
if (!index) {
DRM_ERROR("Invalid sequence %u\n", seq_id);
goto err;
}
}
if (i == MAX_MIPI_CONFIGURATIONS) {
DRM_ERROR("Sequence block detected but no valid configuration\n");
return;
}
/* check if found sequence is completely within the sequence block
* just being paranoid */
if (seq_size > block_size) {
DRM_ERROR("Corrupted sequence/size, bailing out\n");
return;
}
/* skip the panel id(1 byte) and seq size(2 bytes) */
dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
if (!dev_priv->vbt.dsi.data)
return;
/*
* loop into the sequence data and split into multiple sequneces
* There are only 5 types of sequences as of now
*/
data = dev_priv->vbt.dsi.data;
dev_priv->vbt.dsi.data = data;
dev_priv->vbt.dsi.size = seq_size;
dev_priv->vbt.dsi.seq_version = sequence->version;
/* two consecutive 0x00 indicate end of all sequences */
while (1) {
int seq_id = *data;
if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
dev_priv->vbt.dsi.sequence[seq_id] = data;
DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
} else {
DRM_ERROR("undefined sequence\n");
goto err;
}
/* partial parsing to skip elements */
data = goto_next_sequence(data, &seq_size);
if (data == NULL) {
DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
goto err;
}
if (*data == 0)
break; /* end of sequence reached */
}
DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
return;
err:
kfree(dev_priv->vbt.dsi.data);
dev_priv->vbt.dsi.data = NULL;
/* error during parsing so set all pointers to null
* because of partial parsing */
err:
kfree(data);
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
@ -1088,7 +1168,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
if (bdb->version < 195) {
if (bdb->version < 106) {
expected_size = 22;
} else if (bdb->version < 109) {
expected_size = 27;
} else if (bdb->version < 195) {
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
expected_size = sizeof(struct old_child_dev_config);
} else if (bdb->version == 195) {
expected_size = 37;
@ -1101,18 +1186,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
bdb->version, expected_size);
}
/* The legacy sized child device config is the minimum we need. */
if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
DRM_ERROR("Child device config size %u is too small.\n",
p_defs->child_dev_size);
return;
}
/* Flag an error for unexpected size, but continue anyway. */
if (p_defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
p_defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
DRM_DEBUG_KMS("Child device config size %u is too small.\n",
p_defs->child_dev_size);
return;
}
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
@ -1285,7 +1370,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
/**
* intel_bios_init - find VBT and initialize settings from the BIOS
* @dev: DRM device
* @dev_priv: i915 device instance
*
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
* to appropriate values.
@ -1337,7 +1422,8 @@ intel_bios_init(struct drm_i915_private *dev_priv)
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi(dev_priv, bdb);
parse_mipi_config(dev_priv, bdb);
parse_mipi_sequence(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
if (bios)

View File

@ -25,25 +25,43 @@
*
*/
#ifndef _I830_BIOS_H_
#define _I830_BIOS_H_
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
/**
* struct vbt_header - VBT Header structure
* @signature: VBT signature, always starts with "$VBT"
* @version: Version of this structure
* @header_size: Size of this structure
* @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
* @vbt_checksum: Checksum
* @reserved0: Reserved
* @bdb_offset: Offset of &struct bdb_header from beginning of VBT
* @aim_offset: Offsets of add-in data blocks from beginning of VBT
*/
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 vbt_size; /**< in bytes */
u8 signature[20];
u16 version;
u16 header_size;
u16 vbt_size;
u8 vbt_checksum;
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
u32 bdb_offset;
u32 aim_offset[4];
} __packed;
/**
* struct bdb_header - BDB Header structure
* @signature: BDB signature "BIOS_DATA_BLOCK"
* @version: Version of the data block definitions
* @header_size: Size of this structure
* @bdb_size: Size of BDB (BDB Header and data blocks)
*/
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
u8 signature[16];
u16 version;
u16 header_size;
u16 bdb_size;
} __packed;
/* strictly speaking, this is a "skip" block, but it has interesting info */
@ -936,21 +954,29 @@ struct bdb_mipi_sequence {
/* MIPI Sequnece Block definitions */
enum mipi_seq {
MIPI_SEQ_UNDEFINED = 0,
MIPI_SEQ_END = 0,
MIPI_SEQ_ASSERT_RESET,
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
MIPI_SEQ_DEASSERT_RESET,
MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
MIPI_SEQ_POWER_ON, /* sequence block v3+ */
MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
MIPI_SEQ_MAX
};
enum mipi_seq_element {
MIPI_SEQ_ELEM_UNDEFINED = 0,
MIPI_SEQ_ELEM_END = 0,
MIPI_SEQ_ELEM_SEND_PKT,
MIPI_SEQ_ELEM_DELAY,
MIPI_SEQ_ELEM_GPIO,
MIPI_SEQ_ELEM_STATUS,
MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
MIPI_SEQ_ELEM_MAX
};
@ -965,4 +991,4 @@ enum mipi_gpio_pin_index {
MIPI_GPIO_MAX
};
#endif /* _I830_BIOS_H_ */
#endif /* _INTEL_BIOS_H_ */

View File

@ -44,6 +44,8 @@
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
@ -278,10 +280,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
" [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
@ -399,7 +402,10 @@ out:
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
dev_notice(dev_priv->dev->dev,
"Failed to load DMC firmware"
" [" FIRMWARE_URL "],"
" disabling runtime power management.\n");
}
release_firmware(fw);
@ -421,7 +427,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
if (IS_SKYLAKE(dev_priv))
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;

View File

@ -133,38 +133,38 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 },
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80009010, 0x000000C0, 0x1 },
{ 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x000000DF, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80005012, 0x000000C0, 0x1 },
};
/* Skylake U */
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80007011, 0x000000CD, 0x0 },
{ 0x80009010, 0x000000C0, 0x1 },
{ 0x0000201B, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80005012, 0x000000C0, 0x1 },
{ 0x80007011, 0x000000C0, 0x1 },
{ 0x00002016, 0x00000088, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x80005012, 0x000000C0, 0x1 },
};
/* Skylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80007011, 0x000000CD, 0x0 },
{ 0x80009010, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80005012, 0x000000C0, 0x3 },
{ 0x80007011, 0x000000C0, 0x3 },
{ 0x00000018, 0x00000088, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x80005012, 0x000000C0, 0x3 },
};
/*
@ -226,26 +226,26 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00000018, 0x00000098, 0x0 },
{ 0x00004013, 0x00000088, 0x0 },
{ 0x00006012, 0x00000087, 0x0 },
{ 0x80006012, 0x000000CD, 0x1 },
{ 0x00000018, 0x000000DF, 0x0 },
{ 0x00003015, 0x00000087, 0x0 }, /* Default */
{ 0x00003015, 0x000000C7, 0x0 },
{ 0x00000018, 0x000000C7, 0x0 },
{ 0x80003015, 0x000000CD, 0x1 }, /* Default */
{ 0x80003015, 0x000000C0, 0x1 },
{ 0x80000018, 0x000000C0, 0x1 },
};
/* Skylake Y */
static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
{ 0x00007011, 0x00000084, 0x0 },
{ 0x80007011, 0x000000CB, 0x3 },
{ 0x00000018, 0x000000A4, 0x0 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x00004013, 0x00000080, 0x0 },
{ 0x00006013, 0x000000C7, 0x0 },
{ 0x80006013, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000008A, 0x0 },
{ 0x00003015, 0x000000C7, 0x0 }, /* Default */
{ 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
{ 0x00000018, 0x000000C7, 0x0 },
{ 0x80003015, 0x000000C0, 0x3 }, /* Default */
{ 0x80003015, 0x000000C0, 0x3 },
{ 0x80000018, 0x000000C0, 0x3 },
};
struct bxt_ddi_buf_trans {
@ -301,8 +301,8 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
enum port port, int type);
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type);
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
struct intel_digital_port **dig_port,
@ -342,81 +342,50 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
return port;
}
static bool
intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
static const struct ddi_buf_trans *
skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
}
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
int *n_entries)
{
const struct ddi_buf_trans *ddi_translations;
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_dp;
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
ddi_translations = skl_u_ddi_translations_dp;
return skl_y_ddi_translations_dp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
return skl_u_ddi_translations_dp;
} else {
ddi_translations = skl_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
return skl_ddi_translations_dp;
}
return ddi_translations;
}
static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
int *n_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_y_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
} else {
ddi_translations = skl_y_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
}
} else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_u_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
} else {
ddi_translations = skl_u_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
}
} else {
if (dev_priv->edp_low_vswing) {
ddi_translations = skl_ddi_translations_edp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
} else {
ddi_translations = skl_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
}
}
return ddi_translations;
}
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_device *dev,
int *n_entries)
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
const struct ddi_buf_trans *ddi_translations;
if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
ddi_translations = skl_y_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
} else {
ddi_translations = skl_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
if (dev_priv->edp_low_vswing) {
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
return skl_u_ddi_translations_edp;
} else {
*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
return skl_ddi_translations_edp;
}
}
return ddi_translations;
return skl_get_buf_trans_dp(dev_priv, n_entries);
}
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
return skl_ddi_translations_hdmi;
}
}
/*
@ -426,42 +395,52 @@ skl_get_buf_trans_hdmi(struct drm_device *dev,
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool supports_hdmi)
void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
int hdmi_level;
enum port port;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
const struct ddi_buf_trans *ddi_translations_edp;
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
if (IS_BROXTON(dev)) {
if (!supports_hdmi)
port = intel_ddi_get_encoder_port(encoder);
hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
if (IS_BROXTON(dev_priv)) {
if (encoder->type != INTEL_OUTPUT_HDMI)
return;
/* Vswing programming for HDMI */
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
INTEL_OUTPUT_HDMI);
return;
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
}
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
ddi_translations_dp =
skl_get_buf_trans_dp(dev, &n_dp_entries);
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev, &n_edp_entries);
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
ddi_translations_hdmi =
skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = 8;
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
iboost_bit = 1<<31;
} else if (IS_BROADWELL(dev)) {
if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
port != PORT_A && port != PORT_E &&
n_edp_entries > 9))
n_edp_entries = 9;
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
@ -470,7 +449,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
} else if (IS_HASWELL(dev)) {
} else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
@ -490,30 +469,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
hdmi_default_entry = 7;
}
switch (port) {
case PORT_A:
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break;
case PORT_B:
case PORT_C:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
case PORT_D:
if (intel_dp_is_edp(dev, PORT_D)) {
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
} else {
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
}
break;
case PORT_E:
if (ddi_translations_fdi)
ddi_translations = ddi_translations_fdi;
else
ddi_translations = ddi_translations_dp;
case INTEL_OUTPUT_ANALOG:
ddi_translations = ddi_translations_fdi;
size = n_dp_entries;
break;
default:
@ -527,7 +494,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
ddi_translations[i].trans2);
}
if (!supports_hdmi)
if (encoder->type != INTEL_OUTPUT_HDMI)
return;
/* Choose a good default if VBT is badly populated */
@ -542,37 +509,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
ddi_translations_hdmi[hdmi_level].trans2);
}
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
* mode and port E for FDI.
*/
void intel_prepare_ddi(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
bool visited[I915_MAX_PORTS] = { 0, };
if (!HAS_DDI(dev))
return;
for_each_intel_encoder(dev, intel_encoder) {
struct intel_digital_port *intel_dig_port;
enum port port;
bool supports_hdmi;
if (intel_encoder->type == INTEL_OUTPUT_DSI)
continue;
ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
if (visited[port])
continue;
supports_hdmi = intel_dig_port &&
intel_dig_port_supports_hdmi(intel_dig_port);
intel_prepare_ddi_buffers(dev, port, supports_hdmi);
visited[port] = true;
}
}
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
@ -601,8 +537,14 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
u32 temp, i, rx_ctl_val;
for_each_encoder_on_crtc(dev, crtc, encoder) {
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
intel_prepare_ddi_buffer(encoder);
}
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
* - TP1 to TP2 time with the default value
@ -2085,10 +2027,9 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
TRANS_CLK_SEL_DISABLED);
}
static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
enum port port, int type)
static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
uint8_t dp_iboost, hdmi_iboost;
@ -2103,21 +2044,26 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
if (dp_iboost) {
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_EDP) {
if (dp_iboost) {
iboost = dp_iboost;
} else {
ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
if (WARN_ON(port != PORT_A &&
port != PORT_E && n_entries > 9))
n_entries = 9;
iboost = ddi_translations[level].i_boost;
}
} else if (type == INTEL_OUTPUT_HDMI) {
if (hdmi_iboost) {
iboost = hdmi_iboost;
} else {
ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
iboost = ddi_translations[level].i_boost;
}
} else {
@ -2142,10 +2088,9 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
}
static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
enum port port, int type)
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct bxt_ddi_buf_trans *ddi_translations;
u32 n_entries, i;
uint32_t val;
@ -2260,7 +2205,7 @@ static uint32_t translate_signal_level(int signal_levels)
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = dport->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
@ -2270,10 +2215,10 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_set_iboost(dev, level, port, encoder->type);
else if (IS_BROXTON(dev))
bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
else if (IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
return DDI_BUF_TRANS_SELECT(level);
}
@ -2325,12 +2270,12 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
int hdmi_level;
intel_prepare_ddi_buffer(intel_encoder);
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@ -2348,17 +2293,11 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
if (IS_BROXTON(dev)) {
hdmi_level = dev_priv->vbt.
ddi_port_info[port].hdmi_level_shift;
bxt_ddi_vswing_sequence(dev, hdmi_level, port,
INTEL_OUTPUT_HDMI);
}
intel_hdmi->set_infoframes(encoder,
crtc->config->has_hdmi_sink,
&crtc->config->base.adjusted_mode);
@ -3282,6 +3221,33 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp;
int max_lanes;
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
switch (port) {
case PORT_A:
max_lanes = 4;
break;
case PORT_E:
max_lanes = 0;
break;
default:
max_lanes = 4;
break;
}
} else {
switch (port) {
case PORT_A:
max_lanes = 2;
break;
case PORT_E:
max_lanes = 2;
break;
default:
max_lanes = 4;
break;
}
}
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
@ -3315,6 +3281,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
intel_dig_port->max_lanes = max_lanes;
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration

File diff suppressed because it is too large Load Diff

View File

@ -157,14 +157,9 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
u8 source_max, sink_max;
source_max = 4;
if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
(intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
source_max = 2;
source_max = intel_dig_port->max_lanes;
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
return min(source_max, sink_max);
@ -340,8 +335,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
release_cl_override = IS_CHERRYVIEW(dev) &&
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll);
if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
DRM_ERROR("Failed to force on pll for pipe %c!\n",
pipe_name(pipe));
return;
}
}
/*
@ -2243,11 +2242,6 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
_intel_edp_backlight_off(intel_dp);
}
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
}
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@ -2257,7 +2251,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
I915_STATE_WARN(cur_state != state,
"DP port %c state assertion failure (expected %s, current %s)\n",
port_name(dig_port->port),
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@ -2267,7 +2261,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
onoff(state), onoff(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
@ -5839,6 +5833,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
int type, ret;
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
return false;
intel_dp->pps_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
@ -6037,6 +6036,7 @@ intel_dp_init(struct drm_device *dev,
intel_dig_port->port = port;
dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
if (IS_CHERRYVIEW(dev)) {

View File

@ -184,7 +184,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
intel_mst->port = found->port;
if (intel_dp->active_mst_links == 0) {
intel_ddi_clk_select(encoder, intel_crtc->config);
intel_prepare_ddi_buffer(&intel_dig_port->base);
intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
intel_dp_set_link_params(intel_dp, intel_crtc->config);

View File

@ -246,7 +246,18 @@ struct intel_atomic_state {
struct drm_atomic_state base;
unsigned int cdclk;
bool dpll_set;
/*
* Calculated device cdclk, can be different from cdclk
* only when all crtc's are DPMS off.
*/
unsigned int dev_cdclk;
bool dpll_set, modeset;
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
};
@ -647,23 +658,17 @@ struct intel_plane {
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
* the intel_plane_state structure and accessed via drm_plane->state.
* the intel_plane_state structure and accessed via plane_state.
*/
void (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct drm_plane *plane,
struct drm_crtc *crtc);
int (*check_plane)(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
};
struct intel_watermark_params {
@ -817,6 +822,7 @@ struct intel_digital_port {
struct intel_hdmi hdmi;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
@ -996,7 +1002,7 @@ void intel_crt_init(struct drm_device *dev);
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
void intel_prepare_ddi(struct drm_device *dev);
void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
@ -1041,8 +1047,8 @@ unsigned int intel_fb_align_height(struct drm_device *dev,
uint64_t fb_format_modifier);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
enum fb_op_origin origin);
u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
uint32_t pixel_format);
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format);
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
@ -1126,9 +1132,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
uint64_t fb_format_modifier, unsigned int plane);
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
static inline bool
intel_rotation_90_or_270(unsigned int rotation)
@ -1149,8 +1154,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state);
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
/* modesetting asserts */
@ -1167,11 +1172,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv,
int *x, int *y,
uint64_t fb_modifier,
unsigned int cpp,
unsigned int pitch);
void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);

View File

@ -702,7 +702,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
u32 pclk = 0;
u32 pclk;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
@ -713,12 +713,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
*/
pipe_config->dpll_hw_state.dpll_md = 0;
if (IS_BROXTON(encoder->base.dev))
pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
else if (IS_VALLEYVIEW(encoder->base.dev) ||
IS_CHERRYVIEW(encoder->base.dev))
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
if (!pclk)
return;

View File

@ -126,8 +126,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);

View File

@ -229,14 +229,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
{
return data + *(data + 6) + 7;
}
typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
const u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
NULL, /* reserved */
mipi_exec_send_packet,
mipi_exec_delay,
mipi_exec_gpio,
NULL, /* status read; later */
[MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
[MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
[MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
[MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
};
/*
@ -246,107 +250,114 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
"UNDEFINED",
"MIPI_SEQ_ASSERT_RESET",
"MIPI_SEQ_INIT_OTP",
"MIPI_SEQ_DISPLAY_ON",
"MIPI_SEQ_DISPLAY_OFF",
"MIPI_SEQ_DEASSERT_RESET"
[MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
[MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
[MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
[MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
[MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF",
[MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON",
[MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF",
};
static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
static const char *sequence_name(enum mipi_seq seq_id)
{
fn_mipi_elem_exec mipi_elem_exec;
int index;
if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
return seq_name[seq_id];
else
return "(unknown)";
}
if (!data)
static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
return;
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
data = dev_priv->vbt.dsi.sequence[seq_id];
if (!data) {
DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
seq_id, sequence_name(seq_id));
return;
}
/* go to the first element of the sequence */
WARN_ON(*data != seq_id);
DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
data++;
/* parse each byte till we reach end of sequence byte - 0x00 */
/* Skip Size of Sequence. */
if (dev_priv->vbt.dsi.seq_version >= 3)
data += 4;
while (1) {
index = *data;
mipi_elem_exec = exec_elem[index];
if (!mipi_elem_exec) {
DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
u8 operation_byte = *data++;
u8 operation_size = 0;
if (operation_byte == MIPI_SEQ_ELEM_END)
break;
if (operation_byte < ARRAY_SIZE(exec_elem))
mipi_elem_exec = exec_elem[operation_byte];
else
mipi_elem_exec = NULL;
/* Size of Operation. */
if (dev_priv->vbt.dsi.seq_version >= 3)
operation_size = *data++;
if (mipi_elem_exec) {
data = mipi_elem_exec(intel_dsi, data);
} else if (operation_size) {
/* We have size, skip. */
DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
DRM_ERROR("Unsupported MIPI operation byte %u\n",
operation_byte);
return;
}
/* goto element payload */
data++;
/* execute the element specific rotines */
data = mipi_elem_exec(intel_dsi, data);
/*
* After processing the element, data should point to
* next element or end of sequence
* check if have we reached end of sequence
*/
if (*data == 0x00)
break;
}
}
static int vbt_panel_prepare(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
generic_exec_sequence(intel_dsi, sequence);
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
generic_exec_sequence(intel_dsi, sequence);
generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
return 0;
}
static int vbt_panel_unprepare(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
generic_exec_sequence(intel_dsi, sequence);
generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
return 0;
}
static int vbt_panel_enable(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
generic_exec_sequence(intel_dsi, sequence);
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
return 0;
}
static int vbt_panel_disable(struct drm_panel *panel)
{
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const u8 *sequence;
sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
generic_exec_sequence(intel_dsi, sequence);
generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
return 0;
}
@ -666,6 +677,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* This is cheating a bit with the cleanup. */
vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
if (!vbt_panel)
return NULL;
vbt_panel->intel_dsi = intel_dsi;
drm_panel_init(&vbt_panel->panel);

View File

@ -30,14 +30,6 @@
#include "i915_drv.h"
#include "intel_dsi.h"
#define DSI_HSS_PACKET_SIZE 4
#define DSI_HSE_PACKET_SIZE 4
#define DSI_HSA_PACKET_EXTRA_SIZE 6
#define DSI_HBP_PACKET_EXTRA_SIZE 6
#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
#define DSI_HFP_PACKET_EXTRA_SIZE 6
#define DSI_EOTP_PACKET_SIZE 4
static int dsi_pixel_format_bpp(int pixel_format)
{
int bpp;
@ -71,77 +63,6 @@ static const u32 lfsr_converts[] = {
71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
};
#ifdef DSI_CLK_FROM_RR
static u32 dsi_rr_formula(const struct drm_display_mode *mode,
int pixel_format, int video_mode_format,
int lane_count, bool eotp)
{
u32 bpp;
u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
u32 bytes_per_line, bytes_per_frame;
u32 num_frames;
u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
u32 dsi_bit_clock_hz;
u32 dsi_clk;
bpp = dsi_pixel_format_bpp(pixel_format);
hactive = mode->hdisplay;
vactive = mode->vdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
/*
* XXX: Need to accurately calculate LP to HS transition timeout and add
* it to bytes_per_line/bytes_per_frame.
*/
if (eotp && video_mode_format == VIDEO_MODE_BURST)
bytes_per_line += DSI_EOTP_PACKET_SIZE;
bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
vactive * bytes_per_line + vfp * bytes_per_line;
if (eotp &&
(video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
bytes_per_frame += DSI_EOTP_PACKET_SIZE;
num_frames = drm_mode_vrefresh(mode);
bytes_per_x_frames = num_frames * bytes_per_frame;
bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
dsi_clk = dsi_bit_clock_hz / 1000;
if (eotp && video_mode_format == VIDEO_MODE_BURST)
dsi_clk *= 2;
return dsi_clk;
}
#else
/* Get DSI clock from pixel clock */
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
@ -155,8 +76,6 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
return dsi_clk_khz;
}
#endif
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
struct dsi_mnp *dsi_mnp, int target_dsi_clk)
{
@ -322,7 +241,7 @@ static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
bpp, pipe_bpp);
}
u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@ -384,7 +303,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
u32 pclk;
u32 dsi_clk;
@ -419,6 +338,14 @@ u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
if (IS_BROXTON(encoder->base.dev))
return bxt_dsi_get_pclk(encoder, pipe_bpp);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp);
}
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;

View File

@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
@ -171,8 +171,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
out:
mutex_unlock(&dev->struct_mutex);
if (!IS_ERR_OR_NULL(fb))
drm_framebuffer_unreference(fb);
return ret;
}

View File

@ -43,6 +43,7 @@ struct i915_guc_client {
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
uint32_t wq_head;
/* GuC submission statistics & status */
uint64_t submissions[I915_NUM_RINGS];
@ -88,6 +89,8 @@ struct intel_guc {
uint32_t log_flags;
struct drm_i915_gem_object *log_obj;
struct drm_i915_gem_object *ads_obj;
struct drm_i915_gem_object *ctx_pool_obj;
struct ida ctx_ids;
@ -122,5 +125,6 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_device *dev);
void i915_guc_submission_fini(struct drm_device *dev);
int i915_guc_wq_check_space(struct i915_guc_client *client);
#endif

View File

@ -39,6 +39,7 @@
#define GUC_CTX_PRIORITY_HIGH 1
#define GUC_CTX_PRIORITY_KMD_NORMAL 2
#define GUC_CTX_PRIORITY_NORMAL 3
#define GUC_CTX_PRIORITY_NUM 4
#define GUC_MAX_GPU_CONTEXTS 1024
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
@ -81,11 +82,14 @@
#define GUC_CTL_CTXINFO 0
#define GUC_CTL_CTXNUM_IN16_SHIFT 0
#define GUC_CTL_BASE_ADDR_SHIFT 12
#define GUC_CTL_ARAT_HIGH 1
#define GUC_CTL_ARAT_LOW 2
#define GUC_CTL_DEVICE_INFO 3
#define GUC_CTL_GTTYPE_SHIFT 0
#define GUC_CTL_COREFAMILY_SHIFT 7
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
@ -97,9 +101,12 @@
#define GUC_LOG_ISR_PAGES 3
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_PAGE_FAULT_CONTROL 5
#define GUC_CTL_WA 6
#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
#define GUC_CTL_FEATURE 7
#define GUC_CTL_VCS2_ENABLED (1 << 0)
#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
@ -109,6 +116,7 @@
#define GUC_CTL_PREEMPTION_LOG (1 << 5)
#define GUC_CTL_ENABLE_SLPC (1 << 7)
#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
#define GUC_CTL_DEBUG 8
#define GUC_LOG_VERBOSITY_SHIFT 0
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
@ -118,9 +126,19 @@
/* Verbosity range-check limits, without the shift */
#define GUC_LOG_VERBOSITY_MIN 0
#define GUC_LOG_VERBOSITY_MAX 3
#define GUC_LOG_VERBOSITY_MASK 0x0000000f
#define GUC_LOG_DESTINATION_MASK (3 << 4)
#define GUC_LOG_DISABLED (1 << 6)
#define GUC_PROFILE_ENABLED (1 << 7)
#define GUC_WQ_TRACK_ENABLED (1 << 8)
#define GUC_ADS_ENABLED (1 << 9)
#define GUC_DEBUG_RESERVED (1 << 10)
#define GUC_ADS_ADDR_SHIFT 11
#define GUC_ADS_ADDR_MASK 0xfffff800
#define GUC_CTL_RSRVD 9
#define GUC_CTL_MAX_DWORDS (GUC_CTL_RSRVD + 1)
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
/**
* DOC: GuC Firmware Layout
@ -299,6 +317,99 @@ struct guc_context_desc {
#define GUC_POWER_D2 3
#define GUC_POWER_D3 4
/* Scheduling policy settings */
/* Reset engine upon preempt failure */
#define POLICY_RESET_ENGINE (1<<0)
/* Preempt to idle on quantum expiry */
#define POLICY_PREEMPT_TO_IDLE (1<<1)
#define POLICY_MAX_NUM_WI 15
struct guc_policy {
/* Time for one workload to execute. (in micro seconds) */
u32 execution_quantum;
u32 reserved1;
/* Time to wait for a preemption request to completed before issuing a
* reset. (in micro seconds). */
u32 preemption_time;
/* How much time to allow to run after the first fault is observed.
* Then preempt afterwards. (in micro seconds) */
u32 fault_time;
u32 policy_flags;
u32 reserved[2];
} __packed;
struct guc_policies {
struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS];
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
* Typically 1000s of micro seconds (example only, not granularity). */
u32 dpc_promote_time;
/* Must be set to take these new values. */
u32 is_valid;
/* Max number of WIs to process per call. A large value may keep CS
* idle. */
u32 max_num_work_items;
u32 reserved[19];
} __packed;
/* GuC MMIO reg state struct */
#define GUC_REGSET_FLAGS_NONE 0x0
#define GUC_REGSET_POWERCYCLE 0x1
#define GUC_REGSET_MASKED 0x2
#define GUC_REGSET_ENGINERESET 0x4
#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8
#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10
#define GUC_REGSET_MAX_REGISTERS 25
#define GUC_MMIO_WHITE_LIST_START 0x24d0
#define GUC_MMIO_WHITE_LIST_MAX 12
#define GUC_S3_SAVE_SPACE_PAGES 10
struct guc_mmio_regset {
struct __packed {
u32 offset;
u32 value;
u32 flags;
} registers[GUC_REGSET_MAX_REGISTERS];
u32 values_valid;
u32 number_of_registers;
} __packed;
struct guc_mmio_reg_state {
struct guc_mmio_regset global_reg;
struct guc_mmio_regset engine_reg[I915_NUM_RINGS];
/* MMIO registers that are set as non privileged */
struct __packed {
u32 mmio_start;
u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
u32 count;
} mmio_white_list[I915_NUM_RINGS];
} __packed;
/* GuC Additional Data Struct */
struct guc_ads {
u32 reg_state_addr;
u32 reg_state_buffer;
u32 golden_context_lrca;
u32 scheduler_policies;
u32 reserved0[3];
u32 eng_state_size[I915_NUM_RINGS];
u32 reserved2[4];
} __packed;
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum host2guc_action {
HOST2GUC_ACTION_DEFAULT = 0x0,

View File

@ -165,6 +165,13 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
}
if (guc->ads_obj) {
u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
>> PAGE_SHIFT;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
}
/* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) {
u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
@ -438,6 +445,7 @@ fail:
direct_interrupts_to_host(dev_priv);
i915_guc_submission_disable(dev);
i915_guc_submission_fini(dev);
return err;
}
@ -554,10 +562,12 @@ fail:
DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
guc_fw->guc_fw_path, err);
mutex_lock(&dev->struct_mutex);
obj = guc_fw->guc_fw_obj;
if (obj)
drm_gem_object_unreference(&obj->base);
guc_fw->guc_fw_obj = NULL;
mutex_unlock(&dev->struct_mutex);
release_firmware(fw); /* OK even if fw is NULL */
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
@ -624,10 +634,11 @@ void intel_guc_ucode_fini(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
mutex_lock(&dev->struct_mutex);
direct_interrupts_to_host(dev_priv);
i915_guc_submission_disable(dev);
i915_guc_submission_fini(dev);
mutex_lock(&dev->struct_mutex);
if (guc_fw->guc_fw_obj)
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
guc_fw->guc_fw_obj = NULL;

6
drivers/gpu/drm/i915/intel_hdmi.c Normal file → Executable file
View File

@ -2033,6 +2033,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
return;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
@ -2218,6 +2223,7 @@ void intel_hdmi_init(struct drm_device *dev,
dev_priv->dig_port_map[port] = intel_encoder;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}

View File

@ -263,9 +263,76 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
return 0;
}
static void
logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
(ring->id == VCS || ring->id == VCS2);
ring->ctx_desc_template = GEN8_CTX_VALID;
ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(dev))
ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers */
/* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
/* WaEnableForceRestoreInCtxtDescForVCS:skl */
/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
if (ring->disable_lite_restore_wa)
ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
}
/**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
*
* @ctx: Context to work on
* @ring: Engine the descriptor will be used with
*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
* expensive to calculate, we'll just do it once and cache the result,
* which remains valid until the context is unpinned.
*
* This is what a descriptor looks like, from LSB to MSB:
* bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
* bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
* bits 52-63: reserved, may encode the engine ID (for GuC)
*/
static void
intel_lr_context_descriptor_update(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
uint64_t lrca, desc;
lrca = ctx->engine[ring->id].lrc_vma->node.start +
LRC_PPHWSP_PN * PAGE_SIZE;
desc = ring->ctx_desc_template; /* bits 0-11 */
desc |= lrca; /* bits 12-31 */
desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
ctx->engine[ring->id].lrc_desc = desc;
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
return ctx->engine[ring->id].lrc_desc;
}
/**
* intel_execlists_ctx_id() - get the Execlists Context ID
* @ctx_obj: Logical Ring Context backing object.
* @ctx: Context to get the ID for
* @ring: Engine to get the ID for
*
* Do not confuse with ctx->id! Unfortunately we have a name overload
* here: the old context ID we pass to userspace as a handler so that
@ -273,55 +340,15 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
* ELSP so that the GPU can inform us of the context status via
* interrupts.
*
* The context ID is a portion of the context descriptor, so we can
* just extract the required part from the cached descriptor.
*
* Return: 20-bits globally unique context ID.
*/
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
u32 intel_execlists_ctx_id(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE;
/* LRCA is required to be 4K aligned so the more significant 20 bits
* are globally unique */
return lrca >> 12;
}
static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
(ring->id == VCS || ring->id == VCS2);
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE;
WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
desc = GEN8_CTX_VALID;
desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(ctx_obj->base.dev))
desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= GEN8_CTX_PRIVILEGE;
desc |= lrca;
desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers */
/* desc |= GEN8_CTX_FORCE_RESTORE; */
/* WaEnableForceRestoreInCtxtDescForVCS:skl */
/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
if (disable_lite_restore_wa(ring))
desc |= GEN8_CTX_FORCE_RESTORE;
return desc;
return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
}
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
@ -363,20 +390,10 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
{
struct intel_engine_cs *ring = rq->ring;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
struct page *page;
uint32_t *reg_state;
BUG_ON(!ctx_obj);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start;
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* True 32b PPGTT with dynamic page allocation: update PDP
@ -390,8 +407,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
kunmap_atomic(reg_state);
return 0;
}
@ -431,9 +446,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
list_add_tail(&req0->execlist_link,
&ring->execlist_retired_req_list);
list_move_tail(&req0->execlist_link,
&ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
@ -478,16 +492,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
execlist_link);
if (head_req != NULL) {
struct drm_i915_gem_object *ctx_obj =
head_req->ctx->engine[ring->id].state;
if (intel_execlists_ctx_id(ctx_obj) == request_id) {
if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
list_add_tail(&head_req->execlist_link,
&ring->execlist_retired_req_list);
list_move_tail(&head_req->execlist_link,
&ring->execlist_retired_req_list);
return true;
}
}
@ -496,6 +507,19 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
return false;
}
static void get_context_status(struct intel_engine_cs *ring,
u8 read_pointer,
u32 *status, u32 *context_id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
return;
*status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
*context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
}
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
@ -516,16 +540,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
spin_lock(&ring->execlist_lock);
while (read_pointer < write_pointer) {
read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
&status, &status_id);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
@ -538,14 +562,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
WARN(1, "Preemption without Lite Restore\n");
}
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
if (execlists_check_remove_request(ring, status_id))
submit_contexts++;
}
}
if (disable_lite_restore_wa(ring)) {
if (ring->disable_lite_restore_wa) {
/* Prevent a ctx to preempt itself */
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
(submit_contexts != 0))
@ -556,13 +580,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
spin_unlock(&ring->execlist_lock);
WARN(submit_contexts > 2, "More than two context complete events?\n");
if (unlikely(submit_contexts > 2))
DRM_ERROR("More than two context complete events?\n");
ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
/* Update the read pointer to the old write pointer. Manual ringbuffer
* management ftw </sarcasm> */
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
_MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
((u32)ring->next_context_status_buffer &
GEN8_CSB_PTR_MASK) << 8));
_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
ring->next_context_status_buffer << 8));
}
static int execlists_context_queue(struct drm_i915_gem_request *request)
@ -571,7 +598,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
struct drm_i915_gem_request *cursor;
int num_elements = 0;
if (request->ctx != ring->default_context)
if (request->ctx != request->i915->kernel_context)
intel_lr_context_pin(request);
i915_gem_request_reference(request);
@ -592,9 +619,8 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
list_add_tail(&tail_req->execlist_link,
&ring->execlist_retired_req_list);
list_move_tail(&tail_req->execlist_link,
&ring->execlist_retired_req_list);
}
}
@ -660,17 +686,27 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
int ret;
int ret = 0;
request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
if (request->ctx != request->ring->default_context) {
ret = intel_lr_context_pin(request);
if (i915.enable_guc_submission) {
/*
* Check that the GuC has space for the request before
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
struct intel_guc *guc = &request->i915->guc;
ret = i915_guc_wq_check_space(guc->execbuf_client);
if (ret)
return ret;
}
return 0;
if (request->ctx != request->i915->kernel_context)
ret = intel_lr_context_pin(request);
return ret;
}
static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
@ -724,23 +760,34 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static void
static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = request->ring;
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct drm_i915_private *dev_priv = request->i915;
intel_logical_ring_advance(request->ringbuf);
intel_logical_ring_advance(ringbuf);
request->tail = ringbuf->tail;
request->tail = request->ringbuf->tail;
/*
* Here we add two extra NOOPs as padding to avoid
* lite restore of a context with HEAD==TAIL.
*
* Caller must reserve WA_TAIL_DWORDS for us!
*/
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
if (intel_ring_stopped(ring))
return;
if (intel_ring_stopped(request->ring))
return 0;
if (dev_priv->guc.execbuf_client)
i915_guc_submit(dev_priv->guc.execbuf_client, request);
else
execlists_context_queue(request);
return 0;
}
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
@ -967,7 +1014,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
if (ctx_obj && (ctx != ring->default_context))
if (ctx_obj && (ctx != req->i915->kernel_context))
intel_lr_context_unpin(req);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
@ -1013,23 +1060,35 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
}
static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj,
struct intel_ringbuffer *ringbuf)
struct intel_context *ctx)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct page *lrc_state_page;
int ret;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
return ret;
lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
if (WARN_ON(!lrc_state_page)) {
ret = -ENODEV;
goto unpin_ctx_obj;
}
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, ring);
ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page);
ctx_obj->dirty = true;
/* Invalidate GuC TLB. */
@ -1048,11 +1107,9 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
{
int ret = 0;
struct intel_engine_cs *ring = rq->ring;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
ret = intel_lr_context_do_pin(ring, rq->ctx);
if (ret)
goto reset_pin_count;
}
@ -1069,12 +1126,18 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (--rq->ctx->engine[ring->id].pin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (!ctx_obj)
return;
if (--rq->ctx->engine[ring->id].pin_count == 0) {
kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state));
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
rq->ctx->engine[ring->id].lrc_vma = NULL;
rq->ctx->engine[ring->id].lrc_desc = 0;
rq->ctx->engine[ring->id].lrc_reg_state = NULL;
}
}
@ -1087,7 +1150,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (WARN_ON_ONCE(w->count == 0))
if (w->count == 0)
return 0;
ring->gpu_caches_dirty = true;
@ -1474,7 +1537,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
u8 next_context_status_buffer_hw;
lrc_setup_hardware_status_page(ring,
ring->default_context->engine[ring->id].state);
dev_priv->kernel_context->engine[ring->id].state);
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@ -1493,9 +1556,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
* | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
* BDW | CSB regs not reset | CSB regs reset |
* CHT | CSB regs not reset | CSB regs not reset |
* SKL | ? | ? |
* BXT | ? | ? |
*/
next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
& GEN8_CSB_PTR_MASK);
next_context_status_buffer_hw =
GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
/*
* When the CSB registers are reset (also after power-up / gpu reset),
@ -1698,7 +1763,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa;
bool vf_flush_wa = false;
u32 flags = 0;
int ret;
@ -1720,14 +1785,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
/*
* On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
* control.
*/
vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
/*
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
if (IS_GEN9(ring->dev))
vf_flush_wa = true;
}
ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
@ -1791,44 +1856,65 @@ static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
}
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
#define WA_TAIL_DWORDS 2
static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
{
return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
}
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
ret = intel_logical_ring_begin(request, 8);
ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
cmd = MI_STORE_DWORD_IMM_GEN4;
cmd |= MI_GLOBAL_GTT;
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
intel_logical_ring_emit(ringbuf, cmd);
intel_logical_ring_emit(ringbuf,
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_logical_ring_emit(ringbuf,
hws_seqno_address(request->ring) |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(request);
return intel_logical_ring_advance_and_submit(request);
}
/*
* Here we add two extra NOOPs as padding to avoid
* lite restore of a context with HEAD==TAIL.
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
/* w/a for post sync ops following a GPGPU operation we
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
return 0;
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
intel_logical_ring_emit(ringbuf,
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
return intel_logical_ring_advance_and_submit(request);
}
static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
@ -1911,12 +1997,44 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
ring->status_page.obj = NULL;
}
ring->disable_lite_restore_wa = false;
ring->ctx_desc_template = 0;
lrc_destroy_wa_ctx_obj(ring);
ring->dev = NULL;
}
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
static void
logical_ring_default_vfuncs(struct drm_device *dev,
struct intel_engine_cs *ring)
{
/* Default vfuncs which can be overriden by each engine. */
ring->init_hw = gen8_init_common_ring;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
}
static inline void
logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
{
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
static int
logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
{
struct intel_context *dctx = to_i915(dev)->kernel_context;
int ret;
/* Intentionally left blank. */
@ -1933,19 +2051,18 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
logical_ring_init_platform_invariants(ring);
ret = i915_cmd_parser_init_ring(ring);
if (ret)
goto error;
ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
ret = intel_lr_context_deferred_alloc(dctx, ring);
if (ret)
goto error;
/* As this is the default context, always pin it */
ret = intel_lr_context_do_pin(
ring,
ring->default_context->engine[ring->id].state,
ring->default_context->engine[ring->id].ringbuf);
ret = intel_lr_context_do_pin(ring, dctx);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
@ -1968,32 +2085,24 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->name = "render ring";
ring->id = RCS;
ring->exec_id = I915_EXEC_RENDER;
ring->mmio_base = RENDER_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
logical_ring_default_vfuncs(dev, ring);
/* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
ring->init_hw = gen9_init_render_ring;
else
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush_render;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
ring->emit_request = gen8_emit_request_render;
ring->dev = dev;
@ -2027,25 +2136,11 @@ static int logical_bsd_ring_init(struct drm_device *dev)
ring->name = "bsd ring";
ring->id = VCS;
ring->exec_id = I915_EXEC_BSD;
ring->mmio_base = GEN6_BSD_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
@ -2055,22 +2150,13 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
ring->name = "bds2 ring";
ring->name = "bsd2 ring";
ring->id = VCS2;
ring->exec_id = I915_EXEC_BSD;
ring->mmio_base = GEN8_BSD2_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
@ -2082,25 +2168,11 @@ static int logical_blt_ring_init(struct drm_device *dev)
ring->name = "blitter ring";
ring->id = BCS;
ring->exec_id = I915_EXEC_BLT;
ring->mmio_base = BLT_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
@ -2112,25 +2184,11 @@ static int logical_vebox_ring_init(struct drm_device *dev)
ring->name = "video enhancement ring";
ring->id = VECS;
ring->exec_id = I915_EXEC_VEBOX;
ring->mmio_base = VEBOX_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno;
} else {
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
}
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
logical_ring_default_vfuncs(dev, ring);
return logical_ring_init(dev, ring);
}
@ -2368,26 +2426,39 @@ void intel_lr_context_free(struct intel_context *ctx)
{
int i;
for (i = 0; i < I915_NUM_RINGS; i++) {
for (i = I915_NUM_RINGS; --i >= 0; ) {
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
if (ctx_obj) {
struct intel_ringbuffer *ringbuf =
ctx->engine[i].ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
if (!ctx_obj)
continue;
if (ctx == ring->default_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
WARN_ON(ctx->engine[ring->id].pin_count);
intel_ringbuffer_free(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
if (ctx == ctx->i915->kernel_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
WARN_ON(ctx->engine[i].pin_count);
intel_ringbuffer_free(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
}
}
static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
/**
* intel_lr_context_size() - return the size of the context for an engine
* @ring: which engine to find the context size for
*
* Each engine may require a different amount of space for a context image,
* so when allocating (or copying) an image, this function can be used to
* find the right size for the specific engine.
*
* Return: size (in bytes) of an engine-specific context image
*
* Note: this size includes the HWSP, which is part of the context image
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
@ -2444,7 +2515,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
*/
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring)
struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
@ -2455,7 +2526,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
WARN_ON(ctx->engine[ring->id].state);
context_size = round_up(get_lr_context_size(ring), 4096);
context_size = round_up(intel_lr_context_size(ring), 4096);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@ -2481,14 +2552,13 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
if (ctx != ring->default_context && ring->init_context) {
if (ctx != ctx->i915->kernel_context && ring->init_context) {
struct drm_i915_gem_request *req;
ret = i915_gem_request_alloc(ring,
ctx, &req);
if (ret) {
DRM_ERROR("ring create req: %d\n",
ret);
req = i915_gem_request_alloc(ring, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
DRM_ERROR("ring create req: %d\n", ret);
goto error_ringbuf;
}

View File

@ -25,8 +25,6 @@
#define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096
#define GEN8_CSB_ENTRIES 6
#define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
@ -40,6 +38,22 @@
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
/* The docs specify that the write pointer wraps around after 5h, "After status
* is written out to the last available status QW at offset 5h, this pointer
* wraps to 0."
*
* Therefore, one must infer than even though there are 3 bits available, 6 and
* 7 appear to be * reserved.
*/
#define GEN8_CSB_ENTRIES 6
#define GEN8_CSB_PTR_MASK 0x7
#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
#define GEN8_CSB_WRITE_PTR(csb_status) \
(((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
#define GEN8_CSB_READ_PTR(csb_status) \
(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@ -84,6 +98,7 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
void intel_lr_context_free(struct intel_context *ctx);
uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
@ -92,13 +107,15 @@ void intel_lr_context_reset(struct drm_device *dev,
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring);
u32 intel_execlists_ctx_id(struct intel_context *ctx,
struct intel_engine_cs *ring);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);

View File

@ -240,9 +240,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 4);
if (ret) {
@ -283,9 +283,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
@ -349,9 +349,9 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 6);
if (ret) {
@ -423,9 +423,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* synchronous slowpath */
struct drm_i915_gem_request *req;
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {

View File

@ -32,6 +32,8 @@
#include <linux/module.h>
/**
* DOC: RC6
*
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
@ -1672,6 +1674,9 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
if (pipe_h < pfit_h)
pipe_h = pfit_h;
if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate;
pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
pfit_w * pfit_h);
}
@ -1703,6 +1708,8 @@ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
if (WARN(latency == 0, "Latency value missing\n"))
return UINT_MAX;
if (WARN_ON(!pipe_htotal))
return UINT_MAX;
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
@ -1713,6 +1720,17 @@ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
uint8_t bytes_per_pixel)
{
/*
* Neither of these should be possible since this function shouldn't be
* called if the CRTC is off or the plane is invisible. But let's be
* extra paranoid to avoid a potential divide-by-zero if we screw up
* elsewhere in the driver.
*/
if (WARN_ON(!bytes_per_pixel))
return 0;
if (WARN_ON(!horiz_pixels))
return 0;
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
@ -1998,14 +2016,19 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
static uint32_t
hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
hsw_compute_linetime_wm(struct drm_device *dev,
struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
const struct drm_display_mode *adjusted_mode =
&cstate->base.adjusted_mode;
u32 linetime, ips_linetime;
if (!intel_crtc->active)
if (!cstate->base.active)
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
if (WARN_ON(dev_priv->cdclk_freq == 0))
return 0;
/* The WM are computed with base on how long it takes to fill a single
@ -2277,6 +2300,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
return PTR_ERR(cstate);
pipe_wm = &cstate->wm.optimal.ilk;
memset(pipe_wm, 0, sizeof(*pipe_wm));
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
ps = drm_atomic_get_plane_state(state,
@ -2313,8 +2337,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev,
&intel_crtc->base);
pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
/* LP0 watermarks always use 1/2 DDB partitioning */
ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
@ -3597,23 +3620,45 @@ static void skl_update_wm(struct drm_crtc *crtc)
dev_priv->wm.skl_hw = *results;
}
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
static void ilk_compute_wm_config(struct drm_device *dev,
struct intel_wm_config *config)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
for_each_intel_crtc(dev, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
continue;
config->sprites_enabled |= wm->sprites_enabled;
config->sprites_scaled |= wm->sprites_scaled;
config->num_pipes_active++;
}
}
static void ilk_program_watermarks(struct intel_crtc_state *cstate)
{
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config *config = &dev_priv->wm.config;
struct intel_wm_config config = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
ilk_compute_wm_config(dev, &config);
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_INFO(dev)->gen >= 7 &&
config->num_pipes_active == 1 && config->sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
config.num_pipes_active == 1 && config.sprites_enabled) {
ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
} else {
@ -3630,7 +3675,6 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
@ -3650,7 +3694,7 @@ static void ilk_update_wm(struct drm_crtc *crtc)
intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
ilk_program_watermarks(dev_priv);
ilk_program_watermarks(cstate);
}
static void skl_pipe_wm_active_state(uint32_t val,
@ -4036,7 +4080,7 @@ void intel_update_watermarks(struct drm_crtc *crtc)
dev_priv->display.update_wm(crtc);
}
/**
/*
* Lock protecting IPS related data structures
*/
DEFINE_SPINLOCK(mchdev_lock);
@ -4509,13 +4553,13 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
}
if (HAS_RC6p(dev))
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
else
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
}
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@ -4693,8 +4737,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off");
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
/* WaRsUseTimeoutMode */
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
@ -4713,8 +4756,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
if (NEEDS_WaRsDisableCoarsePowerGating(dev))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@ -6981,6 +7023,7 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.program_watermarks = ilk_program_watermarks;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");

View File

@ -1867,15 +1867,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
offset = cs_offset;
}
ret = intel_ring_begin(req, 4);
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
@ -1901,6 +1899,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
return 0;
}
static void cleanup_phys_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
if (!dev_priv->status_page_dmah)
return;
drm_pci_free(ring->dev, dev_priv->status_page_dmah);
ring->status_page.page_addr = NULL;
}
static void cleanup_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
@ -1917,9 +1926,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
static int init_status_page(struct intel_engine_cs *ring)
{
struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj = ring->status_page.obj;
if ((obj = ring->status_page.obj) == NULL) {
if (obj == NULL) {
unsigned flags;
int ret;
@ -1990,6 +1999,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
else
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
ringbuf->vma = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
@ -2056,6 +2066,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
}
}
ringbuf->vma = i915_gem_obj_to_ggtt(obj);
return 0;
}
@ -2164,7 +2176,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
} else {
BUG_ON(ring->id != RCS);
WARN_ON(ring->id != RCS);
ret = init_phys_status_page(ring);
if (ret)
goto error;
@ -2210,7 +2222,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
if (ring->cleanup)
ring->cleanup(ring);
cleanup_status_page(ring);
if (I915_NEED_GFX_HWS(ring->dev)) {
cleanup_status_page(ring);
} else {
WARN_ON(ring->id != RCS);
cleanup_phys_status_page(ring);
}
i915_cmd_parser_fini_ring(ring);
i915_gem_batch_pool_fini(&ring->batch_pool);
@ -2666,6 +2683,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->name = "render ring";
ring->id = RCS;
ring->exec_id = I915_EXEC_RENDER;
ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 8) {
@ -2814,6 +2832,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->name = "bsd ring";
ring->id = VCS;
ring->exec_id = I915_EXEC_BSD;
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
@ -2890,6 +2909,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->name = "bsd2 ring";
ring->id = VCS2;
ring->exec_id = I915_EXEC_BSD;
ring->write_tail = ring_write_tail;
ring->mmio_base = GEN8_BSD2_RING_BASE;
@ -2920,6 +2940,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->name = "blitter ring";
ring->id = BCS;
ring->exec_id = I915_EXEC_BLT;
ring->mmio_base = BLT_RING_BASE;
ring->write_tail = ring_write_tail;
@ -2977,6 +2998,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->name = "video enhancement ring";
ring->id = VECS;
ring->exec_id = I915_EXEC_VEBOX;
ring->mmio_base = VEBOX_RING_BASE;
ring->write_tail = ring_write_tail;

View File

@ -93,11 +93,13 @@ struct intel_ring_hangcheck {
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
u32 instdone[I915_NUM_INSTDONE_REG];
};
struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
struct i915_vma *vma;
struct intel_engine_cs *ring;
struct list_head link;
@ -147,14 +149,15 @@ struct i915_ctx_workarounds {
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
RCS = 0x0,
VCS,
RCS = 0,
BCS,
VECS,
VCS2
VCS,
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
#define I915_NUM_RINGS 5
#define LAST_USER_RING (VECS + 1)
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
@ -268,6 +271,8 @@ struct intel_engine_cs {
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
@ -305,7 +310,6 @@ struct intel_engine_cs {
wait_queue_head_t irq_queue;
struct intel_context *default_context;
struct intel_context *last_context;
struct intel_ring_hangcheck hangcheck;
@ -406,7 +410,7 @@ intel_write_status_page(struct intel_engine_cs *ring,
ring->status_page.page_addr[reg] = value;
}
/**
/*
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
@ -423,6 +427,7 @@ intel_write_status_page(struct intel_engine_cs *ring,
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x30
#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)

View File

@ -532,7 +532,8 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
"Platform doesn't support DC5.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
@ -568,7 +569,8 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
"Platform doesn't support DC6.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
@ -595,7 +597,8 @@ static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc5(dev_priv);
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
i915.enable_dc != 0 && i915.enable_dc != 1)
assert_can_disable_dc6(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@ -623,7 +626,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
struct drm_device *dev = dev_priv->dev;
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
bool is_enabled, enable_requested, check_fuse_status = false;
@ -667,17 +669,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
!I915_READ(HSW_PWR_WELL_BIOS),
"Invalid for power well status to be enabled, unless done by the BIOS, \
when request is to disable!\n");
if (power_well->data == SKL_DISP_PW_2) {
/*
* DDI buffer programming unnecessary during
* driver-load/resume as it's already done
* during modeset initialization then. It's
* also invalid here as encoder list is still
* uninitialized.
*/
if (!dev_priv->power_domains.initializing)
intel_prepare_ddi(dev);
}
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
}
@ -783,7 +774,8 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
i915.enable_dc != 0 && i915.enable_dc != 1)
skl_enable_dc6(dev_priv);
else
gen9_enable_dc5(dev_priv);
@ -795,7 +787,8 @@ static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
if (power_well->count > 0) {
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
} else {
if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
i915.enable_dc != 0 &&
i915.enable_dc != 1)
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
else
@ -1851,7 +1844,7 @@ void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *well;
if (!IS_SKYLAKE(dev_priv))
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
@ -1865,7 +1858,7 @@ void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
{
struct i915_power_well *well;
if (!IS_SKYLAKE(dev_priv))
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);

View File

@ -24,8 +24,8 @@
* Eric Anholt <eric@anholt.net>
*/
/**
* @file SDVO command definitions and structures.
/*
* SDVO command definitions and structures.
*/
#define SDVO_OUTPUT_FIRST (0)
@ -66,39 +66,39 @@ struct intel_sdvo_caps {
#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
#define DTD_FLAG_INTERLACE (1 << 7)
/** This matches the EDID DTD structure, more or less */
/* This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
u16 clock; /**< pixel clock, in 10kHz units */
u8 h_active; /**< lower 8 bits (pixels) */
u8 h_blank; /**< lower 8 bits (pixels) */
u8 h_high; /**< upper 4 bits each h_active, h_blank */
u8 v_active; /**< lower 8 bits (lines) */
u8 v_blank; /**< lower 8 bits (lines) */
u8 v_high; /**< upper 4 bits each v_active, v_blank */
u16 clock; /* pixel clock, in 10kHz units */
u8 h_active; /* lower 8 bits (pixels) */
u8 h_blank; /* lower 8 bits (pixels) */
u8 h_high; /* upper 4 bits each h_active, h_blank */
u8 v_active; /* lower 8 bits (lines) */
u8 v_blank; /* lower 8 bits (lines) */
u8 v_high; /* upper 4 bits each v_active, v_blank */
} part1;
struct {
u8 h_sync_off; /**< lower 8 bits, from hblank start */
u8 h_sync_width; /**< lower 8 bits (pixels) */
/** lower 4 bits each vsync offset, vsync width */
u8 h_sync_off; /* lower 8 bits, from hblank start */
u8 h_sync_width; /* lower 8 bits (pixels) */
/* lower 4 bits each vsync offset, vsync width */
u8 v_sync_off_width;
/**
/*
* 2 high bits of hsync offset, 2 high bits of hsync width,
* bits 4-5 of vsync offset, and 2 high bits of vsync width.
*/
u8 sync_off_width_high;
u8 dtd_flags;
u8 sdvo_flags;
/** bits 6-7 of vsync offset at bits 6-7 */
/* bits 6-7 of vsync offset at bits 6-7 */
u8 v_sync_off_high;
u8 reserved;
} part2;
} __packed;
struct intel_sdvo_pixel_clock_range {
u16 min; /**< pixel clock, in 10kHz units */
u16 max; /**< pixel clock, in 10kHz units */
u16 min; /* pixel clock, in 10kHz units */
u16 max; /* pixel clock, in 10kHz units */
} __packed;
struct intel_sdvo_preferred_input_timing_args {
@ -144,7 +144,7 @@ struct intel_sdvo_preferred_input_timing_args {
#define SDVO_CMD_RESET 0x01
/** Returns a struct intel_sdvo_caps */
/* Returns a struct intel_sdvo_caps */
#define SDVO_CMD_GET_DEVICE_CAPS 0x02
#define SDVO_CMD_GET_FIRMWARE_REV 0x86
@ -152,7 +152,7 @@ struct intel_sdvo_preferred_input_timing_args {
# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
/**
/*
* Reports which inputs are trained (managed to sync).
*
* Devices must have trained within 2 vsyncs of a mode change.
@ -164,10 +164,10 @@ struct intel_sdvo_get_trained_inputs_response {
unsigned int pad:6;
} __packed;
/** Returns a struct intel_sdvo_output_flags of active outputs. */
/* Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
/**
/*
* Sets the current set of active outputs.
*
* Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
@ -175,7 +175,7 @@ struct intel_sdvo_get_trained_inputs_response {
*/
#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
/**
/*
* Returns the current mapping of SDVO inputs to outputs on the device.
*
* Returns two struct intel_sdvo_output_flags structures.
@ -185,29 +185,29 @@ struct intel_sdvo_in_out_map {
u16 in0, in1;
};
/**
/*
* Sets the current mapping of SDVO inputs to outputs on the device.
*
* Takes two struct i380_sdvo_output_flags structures.
*/
#define SDVO_CMD_SET_IN_OUT_MAP 0x07
/**
/*
* Returns a struct intel_sdvo_output_flags of attached displays.
*/
#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
/**
/*
* Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
*/
#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
/**
/*
* Takes a struct intel_sdvo_output_flags.
*/
#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
/**
/*
* Returns a struct intel_sdvo_output_flags of displays with hot plug
* interrupts enabled.
*/
@ -221,7 +221,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
unsigned int pad:6;
} __packed;
/**
/*
* Selects which input is affected by future input commands.
*
* Commands affected include SET_INPUT_TIMINGS_PART[12],
@ -234,7 +234,7 @@ struct intel_sdvo_set_target_input_args {
unsigned int pad:7;
} __packed;
/**
/*
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
@ -280,7 +280,7 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
/**
/*
* Generates a DTD based on the given width, height, and flags.
*
* This will be supported by any device supporting scaling or interlaced
@ -300,24 +300,24 @@ struct intel_sdvo_set_target_input_args {
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
/** Returns a struct intel_sdvo_pixel_clock_range */
/* Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
/** Returns a struct intel_sdvo_pixel_clock_range */
/* Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
/** 6 bytes of bit flags for TV formats shared by all TV format functions */
/* 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
@ -376,7 +376,7 @@ struct intel_sdvo_tv_format {
#define SDVO_CMD_SET_TV_FORMAT 0x29
/** Returns the resolutiosn that can be used with the given TV format */
/* Returns the resolutiosn that can be used with the given TV format */
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
struct intel_sdvo_sdtv_resolution_request {
unsigned int ntsc_m:1;
@ -539,7 +539,7 @@ struct intel_sdvo_hdtv_resolution_reply {
#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
/**
/*
* The panel power sequencing parameters are in units of milliseconds.
* The high fields are bits 8:9 of the 10-bit values.
*/

View File

@ -178,28 +178,33 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
}
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
skl_update_plane(struct drm_plane *drm_plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div, stride;
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(drm_plane->state)->ckey;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr;
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
int scaler_id;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
uint32_t x = plane_state->src.x1 >> 16;
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
const struct intel_scaler *scaler =
&crtc_state->scaler_state.scalers[plane_state->scaler_id];
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
@ -208,14 +213,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
rotation = drm_plane->state->rotation;
rotation = plane_state->base.rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
fb->pixel_format);
scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
/* Sizes are 0 based */
src_w--;
src_h--;
@ -236,9 +239,10 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
if (intel_rotation_90_or_270(rotation)) {
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
/* stride: Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0], 0);
tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
stride = DIV_ROUND_UP(fb->height, tile_height);
plane_size = (src_w << 16) | src_h;
x_offset = stride * tile_height - y - (src_h + 1);
@ -256,13 +260,13 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
/* program plane scaler */
if (scaler_id >= 0) {
if (plane_state->scaler_id >= 0) {
uint32_t ps_ctrl = 0;
int scaler_id = plane_state->scaler_id;
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane));
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
crtc_state->scaler_state.scalers[scaler_id].mode;
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
@ -334,24 +338,29 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
}
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
vlv_update_plane(struct drm_plane *dplane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(dplane->state)->ckey;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
uint32_t x = plane_state->src.x1 >> 16;
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
sprctl = SP_ENABLE;
@ -414,14 +423,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
crtc_h--;
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
&x, &y,
obj->tiling_mode,
pixel_size,
fb->pitches[0]);
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
fb->modifier[0],
pixel_size,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
x += src_w;
@ -474,23 +482,28 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
}
static void
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
ivb_update_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(plane->state)->ckey;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
uint32_t x = plane_state->src.x1 >> 16;
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
sprctl = SPRITE_ENABLE;
@ -544,13 +557,13 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
fb->modifier[0],
pixel_size,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
@ -612,23 +625,28 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
}
static void
ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
ilk_update_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key =
&to_intel_plane_state(plane->state)->ckey;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->dst);
uint32_t crtc_h = drm_rect_height(&plane_state->dst);
uint32_t x = plane_state->src.x1 >> 16;
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
dvscntr = DVS_ENABLE;
@ -678,13 +696,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_page_offset(dev_priv,
&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
fb->modifier[0],
pixel_size,
fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
@ -913,30 +931,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
return 0;
}
static void
intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->base.crtc;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
crtc = crtc ? crtc : plane->crtc;
if (state->visible) {
intel_plane->update_plane(plane, crtc, fb,
state->dst.x1, state->dst.y1,
drm_rect_width(&state->dst),
drm_rect_height(&state->dst),
state->src.x1 >> 16,
state->src.y1 >> 16,
drm_rect_width(&state->src) >> 16,
drm_rect_height(&state->src) >> 16);
} else {
intel_plane->disable_plane(plane, crtc);
}
}
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@ -1118,7 +1112,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,

View File

@ -327,13 +327,54 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
}
}
static bool
fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
u32 dbg;
dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
return false;
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
return true;
}
static bool
vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
u32 cer;
cer = __raw_i915_read32(dev_priv, CLAIM_ER);
if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
return false;
__raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
return true;
}
static bool
check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
return fpga_check_for_unclaimed_mmio(dev_priv);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_check_for_unclaimed_mmio(dev_priv);
return false;
}
static void __intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_FPGA_DBG_UNCLAIMED(dev))
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* clear out old GT FIFO errors */
if (IS_GEN6(dev) || IS_GEN7(dev))
@ -585,38 +626,38 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
}
static void
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
i915_reg_t reg, bool read, bool before)
__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const i915_reg_t reg,
const bool read,
const bool before)
{
const char *op = read ? "reading" : "writing to";
const char *when = before ? "before" : "after";
if (!i915.mmio_debug)
/* XXX. We limit the auto arming traces for mmio
* debugs on these platforms. There are just too many
* revealed by these and CI/Bat suffers from the noise.
* Please fix and then re-enable the automatic traces.
*/
if (i915.mmio_debug < 2 &&
(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
when, op, i915_mmio_reg_offset(reg));
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
if (WARN(check_for_unclaimed_mmio(dev_priv),
"Unclaimed register detected %s %s register 0x%x\n",
before ? "before" : "after",
read ? "reading" : "writing to",
i915_mmio_reg_offset(reg)))
i915.mmio_debug--; /* Only report the first N failures */
}
}
static void
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
static inline void
unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const i915_reg_t reg,
const bool read,
const bool before)
{
static bool mmio_debug_once = true;
if (i915.mmio_debug || !mmio_debug_once)
if (likely(!i915.mmio_debug))
return;
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
i915.mmio_debug = mmio_debug_once--;
}
__unclaimed_reg_debug(dev_priv, reg, read, before);
}
#define GEN2_READ_HEADER(x) \
@ -664,9 +705,11 @@ __gen2_read(64)
unsigned long irqflags; \
u##x val = 0; \
assert_rpm_wakelock_held(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
unclaimed_reg_debug(dev_priv, reg, true, true)
#define GEN6_READ_FOOTER \
unclaimed_reg_debug(dev_priv, reg, true, false); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
@ -699,11 +742,9 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (NEEDS_FORCE_WAKE(offset)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
val = __raw_i915_read##x(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
@ -751,7 +792,6 @@ static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
@ -765,7 +805,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
GEN6_READ_FOOTER; \
}
@ -864,9 +903,11 @@ __gen2_write(64)
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_wakelock_held(dev_priv); \
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
unclaimed_reg_debug(dev_priv, reg, false, true)
#define GEN6_WRITE_FOOTER \
unclaimed_reg_debug(dev_priv, reg, false, false); \
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
#define __gen6_write(x) \
@ -892,13 +933,10 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
if (NEEDS_FORCE_WAKE(offset)) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
@ -928,12 +966,9 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
__force_wake_get(dev_priv, FORCEWAKE_RENDER); \
__raw_i915_write##x(dev_priv, reg, val); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
@ -987,7 +1022,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (!SKL_NEEDS_FORCE_WAKE(offset) || \
is_gen9_shadowed(dev_priv, reg)) \
fw_engine = 0; \
@ -1002,8 +1036,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
if (fw_engine) \
__force_wake_get(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
hsw_unclaimed_reg_detect(dev_priv); \
GEN6_WRITE_FOOTER; \
}
@ -1223,6 +1255,8 @@ void intel_uncore_init(struct drm_device *dev)
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
dev_priv->uncore.unclaimed_mmio_check = 1;
switch (INTEL_INFO(dev)->gen) {
default:
case 9:
@ -1580,13 +1614,26 @@ bool intel_has_gpu_reset(struct drm_device *dev)
return intel_get_gpu_reset(dev) != NULL;
}
void intel_uncore_check_errors(struct drm_device *dev)
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
(__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
DRM_ERROR("Unclaimed register before interrupt\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
return check_for_unclaimed_mmio(dev_priv);
}
bool
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
{
if (unlikely(i915.mmio_debug ||
dev_priv->uncore.unclaimed_mmio_check <= 0))
return false;
if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
i915.mmio_debug++;
dev_priv->uncore.unclaimed_mmio_check--;
return true;
}
return false;
}

View File

@ -812,10 +812,35 @@ struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
/** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
/** Return busy status
*
* A return of 0 implies that the object is idle (after
* having flushed any pending activity), and a non-zero return that
* the object is still in-flight on the GPU. (The GPU has not yet
* signaled completion for all pending requests that reference the
* object.)
*
* The returned dword is split into two fields to indicate both
* the engines on which the object is being read, and the
* engine on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
* to by any engine (there can only be one, as the GEM implicit
* synchronisation rules force writes to be serialised). Only the
* engine for the last write is reported.
*
* The high word (bits 16:31) are a bitmask of which engines are
* currently reading from the object. Multiple engines may be
* reading from the object simultaneously.
*
* The value of each engine is the same as specified in the
* EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
* Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
* the I915_EXEC_RENDER engine for execution, and so it is never
* reported as active itself. Some hardware may have parallel
* execution engines, e.g. multiple media engines, which are
* mapped to the same identifier in the EXECBUFFER2 ioctl and
* so are not separately reported for busyness.
*/
__u32 busy;
};

View File

@ -48,6 +48,12 @@ static sector_t map_swap_entry(swp_entry_t, struct block_device**);
DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
atomic_long_t nr_swap_pages;
/*
* Some modules use swappable objects and may try to swap them out under
* memory pressure (via the shrinker). Before doing so, they may wish to
* check to see if any swap space is available.
*/
EXPORT_SYMBOL_GPL(nr_swap_pages);
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
long total_swap_pages;
static int least_priority;