Merge tag 'drm-intel-next-2013-06-18' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Last 3.11 feature pull. I have a few odds bits and pieces and fixes in my
queue, I'll sort them out later on to see what's for 3.11-fixes and what's
for 3.12. But nothing to hold this here up imo.

Highlights:
- more hangcheck work from Mika and Chris to prepare for arb robustness
- trickle feed fixes from Ville
- first parts of the shared pch pll rework, with some basic hw state
  readout and cross-checking (this shuts up the confused pch pll refcount
  WARN that Linus just recently forwarded)
- Haswell audio power well support from Wang Xingchao (alsa bits acked by
  Takashi)
- some cleanups and asserts sprinkling around the plane/gamma enabling
  sequence from Ville
- more gtt refactoring from Ben
- clear up the adjusted->mode vs. pixel clock vs. port clock confusion
- 30bpp support, this time for real hopefully

* tag 'drm-intel-next-2013-06-18' of git://people.freedesktop.org/~danvet/drm-intel: (97 commits)
  drm/i915: remove a superflous semi-colon
  drm/i915: Kill useless "Enable panel fitter" comments
  drm/i915: Remove extra "ring" from error message
  drm/i915: simplify the reduced clock handling for pch plls
  drm/i915: stop killing pfit on i9xx
  drm/i915: explicitly set up PIPECONF (and gamma table) on haswell
  drm/i915: set up PIPECONF explicitly for i9xx/vlv platforms
  drm/i915: set up PIPECONF explicitly on ilk-ivb
  drm/i915: find guilty batch buffer on ring resets
  drm/i915: store ring hangcheck action
  drm/i915: add batch bo to i915_add_request()
  drm/i915: change i915_add_request to macro
  drm/i915: add i915_gem_context_get_hang_stats()
  drm/i915: add struct i915_ctx_hang_stats
  drm/i915: Try harder to disable trickle feed on VLV
  drm/i915: fix up pch pll enabling for pixel multipliers
  drm/i915: hw state readout and cross-checking for shared dplls
  drm/i915: WARN on lack of shared dpll
  drm/i915: split up intel_modeset_check_state
  drm/i915: extract readout_hw_state from setup_hw_state
  ...

Conflicts:
	drivers/gpu/drm/i915/intel_display.c
	drivers/gpu/drm/i915/intel_fb.c
	drivers/gpu/drm/i915/intel_sdvo.c
This commit is contained in:
Dave Airlie 2013-06-28 09:50:34 +10:00
commit 28419261b0
33 changed files with 1808 additions and 890 deletions

View File

@ -196,6 +196,32 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
} \
} while (0)
struct file_stats {
int count;
size_t total, active, inactive, unbound;
};
static int per_file_stats(int id, void *ptr, void *data)
{
struct drm_i915_gem_object *obj = ptr;
struct file_stats *stats = data;
stats->count++;
stats->total += obj->base.size;
if (obj->gtt_space) {
if (!list_empty(&obj->ring_list))
stats->active += obj->base.size;
else
stats->inactive += obj->base.size;
} else {
if (!list_empty(&obj->global_list))
stats->unbound += obj->base.size;
}
return 0;
}
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@ -204,6 +230,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
u32 count, mappable_count, purgeable_count;
size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
struct drm_file *file;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@ -215,7 +242,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->mm.object_memory);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.bound_list, gtt_list);
count_objects(&dev_priv->mm.bound_list, global_list);
seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
@ -230,7 +257,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = purgeable_size = purgeable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
size += obj->base.size, ++count;
if (obj->madv == I915_MADV_DONTNEED)
purgeable_size += obj->base.size, ++purgeable_count;
@ -238,7 +265,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
++count;
@ -263,6 +290,21 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->gtt.total,
dev_priv->gtt.mappable_end - dev_priv->gtt.start);
seq_printf(m, "\n");
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
memset(&stats, 0, sizeof(stats));
idr_for_each(&file->object_idr, per_file_stats, &stats);
seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
get_pid_task(file->pid, PIDTYPE_PID)->comm,
stats.count,
stats.total,
stats.active,
stats.inactive,
stats.unbound);
}
mutex_unlock(&dev->struct_mutex);
return 0;
@ -283,7 +325,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return ret;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
@ -1944,7 +1986,8 @@ i915_drop_caches_set(void *data, u64 val)
}
if (val & DROP_UNBOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list)
if (obj->pages_pin_count == 0) {
ret = i915_gem_object_put_pages(obj);
if (ret)

View File

@ -1001,8 +1001,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
}
@ -1633,6 +1632,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
if (HAS_POWER_WELL(dev))
i915_init_power_well(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
@ -1684,6 +1686,9 @@ int i915_driver_unload(struct drm_device *dev)
intel_gpu_ips_teardown();
if (HAS_POWER_WELL(dev))
i915_remove_power_well(dev);
i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.shrink)
@ -1775,7 +1780,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv;
DRM_DEBUG_DRIVER("\n");
file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;

View File

@ -457,7 +457,6 @@ void intel_detect_pch(struct drm_device *dev)
*/
if (INTEL_INFO(dev)->num_pipes == 0) {
dev_priv->pch_type = PCH_NOP;
dev_priv->num_pch_pll = 0;
return;
}
@ -476,34 +475,28 @@ void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
pci_dev_put(pch);
}
@ -570,7 +563,7 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_opregion_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, 1);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
return 0;
@ -614,7 +607,7 @@ void intel_console_resume(struct work_struct *work)
struct drm_device *dev = dev_priv->dev;
console_lock();
intel_fbdev_set_suspend(dev, 0);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
}
@ -683,7 +676,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
* path of resume if possible.
*/
if (console_trylock()) {
intel_fbdev_set_suspend(dev, 0);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
} else {
schedule_work(&dev_priv->console_resume_work);

View File

@ -132,15 +132,38 @@ enum hpd_pin {
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
struct intel_pch_pll {
struct drm_i915_private;
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B,
};
#define I915_NUM_PLLS 2
struct intel_dpll_hw_state {
uint32_t dpll;
uint32_t fp0;
uint32_t fp1;
};
struct intel_shared_dpll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
int pll_reg;
int fp0_reg;
int fp1_reg;
const char *name;
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
void (*enable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*disable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
};
#define I915_NUM_PLLS 2
/* Used by dp and fdi links */
struct intel_link_m_n {
@ -195,7 +218,6 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
struct drm_i915_private;
struct intel_opregion {
struct opregion_header __iomem *header;
@ -306,6 +328,8 @@ struct drm_i915_error_state {
struct intel_crtc_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
struct drm_i915_display_funcs {
bool (*fbc_enabled)(struct drm_device *dev);
@ -313,6 +337,24 @@ struct drm_i915_display_funcs {
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
/**
* find_dpll() - Find the best values for the PLL
* @limit: limits for the PLL
* @crtc: current CRTC
* @target: target frequency in kHz
* @refclk: reference clock frequency in kHz
* @match_clock: if provided, @best_clock P divider must
* match the P divider from @match_clock
* used for LVDS downclocking
* @best_clock: best PLL values found
*
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
struct drm_crtc *crtc,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size,
@ -466,6 +508,13 @@ struct i915_hw_ppgtt {
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
struct i915_ctx_hang_stats {
/* This context had batch pending when hang was declared */
unsigned batch_pending;
/* This context had batch active when hang was declared */
unsigned batch_active;
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0
@ -476,6 +525,7 @@ struct i915_hw_context {
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
};
enum no_fbc_reason {
@ -720,6 +770,15 @@ struct intel_ilk_power_mgmt {
struct drm_i915_gem_object *renderctx;
};
/* Power well structure for haswell */
struct i915_power_well {
struct drm_device *device;
spinlock_t lock;
/* power well enable/disable usage count */
int count;
int i915_request;
};
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
@ -842,7 +901,6 @@ struct i915_gpu_error {
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
/* For reset and error_state handling. */
spinlock_t lock;
@ -998,7 +1056,6 @@ typedef struct drm_i915_private {
u32 hpd_event_bits;
struct timer_list hotplug_reenable_timer;
int num_pch_pll;
int num_plane;
unsigned long cfb_size;
@ -1059,7 +1116,8 @@ typedef struct drm_i915_private {
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
/* Reclocking support */
@ -1080,6 +1138,9 @@ typedef struct drm_i915_private {
* mchdev_lock in intel_pm.c */
struct intel_ilk_power_mgmt ips;
/* Haswell power well */
struct i915_power_well power_well;
enum no_fbc_reason no_fbc_reason;
struct drm_mm_node *compressed_fb;
@ -1154,7 +1215,7 @@ struct drm_i915_gem_object {
struct drm_mm_node *gtt_space;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head gtt_list;
struct list_head global_list;
/** This object's place on the active/inactive lists */
struct list_head ring_list;
@ -1301,12 +1362,18 @@ struct drm_i915_gem_request {
/** GEM sequence number associated with this request. */
uint32_t seqno;
/** Postion in the ringbuffer of the end of the request */
/** Position in the ringbuffer of the start of the request */
u32 head;
/** Position in the ringbuffer of the end of the request */
u32 tail;
/** Context related to this request */
struct i915_hw_context *ctx;
/** Batch buffer related to this request if any */
struct drm_i915_gem_object *batch_obj;
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
@ -1324,6 +1391,8 @@ struct drm_i915_file_private {
struct list_head request_list;
} mm;
struct idr context_idr;
struct i915_ctx_hang_stats hang_stats;
};
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
@ -1660,6 +1729,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
}
@ -1692,9 +1762,12 @@ void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
u32 *seqno);
int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_object *batch_obj,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@ -1748,6 +1821,10 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
kref_put(&ctx->ref, i915_gem_context_free);
}
struct i915_ctx_hang_stats * __must_check
i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
struct drm_file *file,
u32 id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,

View File

@ -176,7 +176,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
@ -956,7 +956,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
ret = 0;
if (seqno == ring->outstanding_lazy_request)
ret = i915_add_request(ring, NULL, NULL);
ret = i915_add_request(ring, NULL);
return ret;
}
@ -1676,7 +1676,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
list_del(&obj->gtt_list);
list_del(&obj->global_list);
ops->put_pages(obj);
obj->pages = NULL;
@ -1696,7 +1696,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
list_for_each_entry_safe(obj, next,
&dev_priv->mm.unbound_list,
gtt_list) {
global_list) {
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
@ -1733,7 +1733,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list)
i915_gem_object_put_pages(obj);
}
@ -1858,7 +1859,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (ret)
return ret;
list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
return 0;
}
@ -1996,17 +1997,18 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0;
}
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
u32 *out_seqno)
int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_object *obj,
u32 *out_seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position;
u32 request_ring_position, request_start;
int was_empty;
int ret;
request_start = intel_ring_get_tail(ring);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@ -2038,8 +2040,17 @@ i915_add_request(struct intel_ring_buffer *ring,
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->head = request_start;
request->tail = request_ring_position;
request->ctx = ring->last_context;
request->batch_obj = obj;
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
* request is retired will the the batch_obj be moved onto the
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
if (request->ctx)
i915_gem_context_reference(request->ctx);
@ -2096,6 +2107,94 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
{
if (acthd >= obj->gtt_offset &&
acthd < obj->gtt_offset + obj->base.size)
return true;
return false;
}
static bool i915_head_inside_request(const u32 acthd_unmasked,
const u32 request_start,
const u32 request_end)
{
const u32 acthd = acthd_unmasked & HEAD_ADDR;
if (request_start < request_end) {
if (acthd >= request_start && acthd < request_end)
return true;
} else if (request_start > request_end) {
if (acthd >= request_start || acthd < request_end)
return true;
}
return false;
}
static bool i915_request_guilty(struct drm_i915_gem_request *request,
const u32 acthd, bool *inside)
{
/* There is a possibility that unmasked head address
* pointing inside the ring, matches the batch_obj address range.
* However this is extremely unlikely.
*/
if (request->batch_obj) {
if (i915_head_inside_object(acthd, request->batch_obj)) {
*inside = true;
return true;
}
}
if (i915_head_inside_request(acthd, request->head, request->tail)) {
*inside = false;
return true;
}
return false;
}
static void i915_set_reset_status(struct intel_ring_buffer *ring,
struct drm_i915_gem_request *request,
u32 acthd)
{
struct i915_ctx_hang_stats *hs = NULL;
bool inside, guilty;
/* Innocent until proven guilty */
guilty = false;
if (ring->hangcheck.action != wait &&
i915_request_guilty(request, acthd, &inside)) {
DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
request->batch_obj ?
request->batch_obj->gtt_offset : 0,
request->ctx ? request->ctx->id : 0,
acthd);
guilty = true;
}
/* If contexts are disabled or this is the default context, use
* file_priv->reset_state
*/
if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
hs = &request->ctx->hang_stats;
else if (request->file_priv)
hs = &request->file_priv->hang_stats;
if (hs) {
if (guilty)
hs->batch_active++;
else
hs->batch_pending++;
}
}
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
list_del(&request->list);
@ -2110,6 +2209,12 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
u32 completed_seqno;
u32 acthd;
acthd = intel_ring_get_active_head(ring);
completed_seqno = ring->get_seqno(ring, false);
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@ -2117,6 +2222,9 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request,
list);
if (request->seqno > completed_seqno)
i915_set_reset_status(ring, request, acthd);
i915_gem_free_request(request);
}
@ -2276,7 +2384,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
idle = true;
for_each_ring(ring, dev_priv, i) {
if (ring->gpu_caches_dirty)
i915_add_request(ring, NULL, NULL);
i915_add_request(ring, NULL);
idle &= list_empty(&ring->request_list);
}
@ -2508,9 +2616,10 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_gtt_finish_object(obj);
i915_gem_object_unpin_pages(obj);
list_del(&obj->mm_list);
list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
@ -2918,7 +3027,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
struct drm_i915_gem_object *obj;
int err = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
if (obj->gtt_space == NULL) {
printk(KERN_ERR "object found on GTT list with no space reserved\n");
err++;
@ -3042,7 +3151,7 @@ search_free:
return ret;
}
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->gtt_space = node;
@ -3057,7 +3166,6 @@ search_free:
obj->map_and_fenceable = mappable && fenceable;
i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
@ -3757,7 +3865,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
@ -3857,7 +3965,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
dev_priv->mm.interruptible = was_interruptible;
}
obj->pages_pin_count = 0;
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
@ -4498,10 +4612,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
}
cnt = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;

View File

@ -303,6 +303,34 @@ static int context_idr_cleanup(int id, void *p, void *data)
return 0;
}
struct i915_ctx_hang_stats *
i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
struct drm_file *file,
u32 id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *to;
if (dev_priv->hw_contexts_disabled)
return ERR_PTR(-ENOENT);
if (ring->id != RCS)
return ERR_PTR(-EINVAL);
if (file == NULL)
return ERR_PTR(-EINVAL);
if (id == DEFAULT_CONTEXT_ID)
return &file_priv->hang_stats;
to = i915_gem_context_get(file->driver_priv, id);
if (to == NULL)
return ERR_PTR(-ENOENT);
return &to->hang_stats;
}
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@ -427,7 +455,7 @@ static int do_switch(struct i915_hw_context *to)
from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring);
ret = i915_add_request(ring, NULL, NULL);
ret = i915_add_request(ring, NULL);
if (ret) {
/* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is

View File

@ -786,7 +786,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj);
intel_mark_fb_busy(obj, ring);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
@ -796,13 +796,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring)
struct intel_ring_buffer *ring,
struct drm_i915_gem_object *obj)
{
/* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
(void)i915_add_request(ring, file, NULL);
(void)__i915_add_request(ring, file, obj, NULL);
}
static int
@ -1083,7 +1084,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
eb_destroy(eb);

View File

@ -439,7 +439,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
dev_priv->gtt.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
@ -631,7 +631,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
obj->gtt_offset, obj->base.size);

View File

@ -279,7 +279,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
goto cleanup;
obj->has_dma_mapping = true;
obj->pages_pin_count = 1;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
@ -383,7 +383,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj->gtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
return obj;

View File

@ -683,7 +683,6 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
dev_priv->gpu_error.hangcheck_count = 0;
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@ -1656,7 +1655,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
struct drm_i915_gem_object *obj;
int i = 0;
list_for_each_entry(obj, head, gtt_list) {
list_for_each_entry(obj, head, global_list) {
if (obj->pin_count == 0)
continue;
@ -1798,7 +1797,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
if (ring->id != RCS || !error->ccid)
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
ering->ctx = i915_error_object_create_sized(dev_priv,
obj, 1);
@ -1935,7 +1934,7 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
@ -2315,38 +2314,28 @@ ring_last_seqno(struct intel_ring_buffer *ring)
struct drm_i915_gem_request, list)->seqno;
}
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring,
u32 ring_seqno, bool *err)
static bool
ring_idle(struct intel_ring_buffer *ring, u32 seqno)
{
if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring_seqno, ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
if (waitqueue_active(&ring->irq_queue)) {
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
ring->name);
wake_up_all(&ring->irq_queue);
*err = true;
}
return true;
}
return false;
return (list_empty(&ring->request_list) ||
i915_seqno_passed(seqno, ring_last_seqno(ring)));
}
static bool semaphore_passed(struct intel_ring_buffer *ring)
static struct intel_ring_buffer *
semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
struct intel_ring_buffer *signaller;
u32 cmd, ipehr, acthd_min;
u32 cmd, ipehr, acthd, acthd_min;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if ((ipehr & ~(0x3 << 16)) !=
(MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
return false;
return NULL;
/* ACTHD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX.
*/
acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
acthd_min = max((int)acthd - 3 * 4, 0);
do {
cmd = ioread32(ring->virtual_start + acthd);
@ -2355,128 +2344,216 @@ static bool semaphore_passed(struct intel_ring_buffer *ring)
acthd -= 4;
if (acthd < acthd_min)
return false;
return NULL;
} while (1);
signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
return i915_seqno_passed(signaller->get_seqno(signaller, false),
ioread32(ring->virtual_start+acthd+4)+1);
*seqno = ioread32(ring->virtual_start+acthd+4)+1;
return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
}
static bool kick_ring(struct intel_ring_buffer *ring)
static int semaphore_passed(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ring_buffer *signaller;
u32 seqno, ctl;
ring->hangcheck.deadlock = true;
signaller = semaphore_waits_for(ring, &seqno);
if (signaller == NULL || signaller->hangcheck.deadlock)
return -1;
/* cursory check for an unkickable deadlock */
ctl = I915_READ_CTL(signaller);
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
return -1;
return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
}
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
struct intel_ring_buffer *ring;
int i;
for_each_ring(ring, dev_priv, i)
ring->hangcheck.deadlock = false;
}
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT) {
DRM_ERROR("Kicking stuck wait on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
return true;
}
u32 tmp;
if (INTEL_INFO(dev)->gen >= 6 &&
tmp & RING_WAIT_SEMAPHORE &&
semaphore_passed(ring)) {
DRM_ERROR("Kicking stuck semaphore on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
return true;
}
return false;
}
if (ring->hangcheck.acthd != acthd)
return active;
static bool i915_hangcheck_ring_hung(struct intel_ring_buffer *ring)
{
if (IS_GEN2(ring->dev))
return false;
if (IS_GEN2(dev))
return hung;
/* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit
* and break the hang. This should work on
* all but the second generation chipsets.
*/
return !kick_ring(ring);
}
static bool i915_hangcheck_hung(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->gpu_error.hangcheck_count++ > 1) {
bool hung = true;
struct intel_ring_buffer *ring;
int i;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
i915_handle_error(dev, true);
for_each_ring(ring, dev_priv, i)
hung &= i915_hangcheck_ring_hung(ring);
return hung;
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT) {
DRM_ERROR("Kicking stuck wait on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
return kick;
}
return false;
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(ring)) {
default:
return hung;
case 1:
DRM_ERROR("Kicking stuck semaphore on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
return kick;
case 0:
return wait;
}
}
return hung;
}
/**
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. The first time this is called we simply record
* ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
* again, we assume the chip is wedged and try to fix it.
* batchbuffers in a long time. We keep track per ring seqno progress and
* if there are no progress, hangcheck score for that ring is increased.
* Further, acthd is inspected to see if the ring is stuck. On stuck case
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
bool err = false, idle;
int i;
u32 seqno[I915_NUM_RINGS];
bool work_done;
int busy_count = 0, rings_hung = 0;
bool stuck[I915_NUM_RINGS] = { 0 };
#define BUSY 1
#define KICK 5
#define HUNG 20
#define FIRE 30
if (!i915_enable_hangcheck)
return;
idle = true;
for_each_ring(ring, dev_priv, i) {
seqno[i] = ring->get_seqno(ring, false);
idle &= i915_hangcheck_ring_idle(ring, seqno[i], &err);
}
u32 seqno, acthd;
bool busy = true;
/* If all work is done then ACTHD clearly hasn't advanced. */
if (idle) {
if (err) {
if (i915_hangcheck_hung(dev))
return;
semaphore_clear_deadlocks(dev_priv);
goto repeat;
seqno = ring->get_seqno(ring, false);
acthd = intel_ring_get_active_head(ring);
if (ring->hangcheck.seqno == seqno) {
if (ring_idle(ring, seqno)) {
if (waitqueue_active(&ring->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
ring->name);
wake_up_all(&ring->irq_queue);
ring->hangcheck.score += HUNG;
} else
busy = false;
} else {
int score;
/* We always increment the hangcheck score
* if the ring is busy and still processing
* the same request, so that no single request
* can run indefinitely (such as a chain of
* batches). The only time we do not increment
* the hangcheck score on this ring, if this
* ring is in a legitimate wait for another
* ring. In that case the waiting ring is a
* victim and we want to be sure we catch the
* right culprit. Then every time we do kick
* the ring, add a small increment to the
* score so that we can catch a batch that is
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
ring->hangcheck.action = ring_stuck(ring,
acthd);
switch (ring->hangcheck.action) {
case wait:
score = 0;
break;
case active:
score = BUSY;
break;
case kick:
score = KICK;
break;
case hung:
score = HUNG;
stuck[i] = true;
break;
}
ring->hangcheck.score += score;
}
} else {
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
}
dev_priv->gpu_error.hangcheck_count = 0;
ring->hangcheck.seqno = seqno;
ring->hangcheck.acthd = acthd;
busy_count += busy;
}
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.score > FIRE) {
DRM_ERROR("%s on %s\n",
stuck[i] ? "stuck" : "no progress",
ring->name);
rings_hung++;
}
}
if (rings_hung)
return i915_handle_error(dev, true);
if (busy_count)
/* Reset timer case chip hangs without another request
* being added */
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies +
DRM_I915_HANGCHECK_JIFFIES));
}
static void ibx_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_PCH_NOP(dev))
return;
}
work_done = false;
for_each_ring(ring, dev_priv, i) {
if (ring->hangcheck.seqno != seqno[i]) {
work_done = true;
ring->hangcheck.seqno = seqno[i];
}
}
if (!work_done) {
if (i915_hangcheck_hung(dev))
return;
} else {
dev_priv->gpu_error.hangcheck_count = 0;
}
repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
/*
* SDEIER is also touched by the interrupt handler to work around missed
* PCH interrupts. Hence we can't update it after the interrupt handler
* is enabled - instead we unconditionally enable all PCH interrupt
* sources here, but then only unmask them as needed with SDEIMR.
*/
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
}
/* drm_dma.h hooks
@ -2500,16 +2577,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
/*
* SDEIER is also touched by the interrupt handler to work around missed
* PCH interrupts. Hence we can't update it after the interrupt handler
* is enabled - instead we unconditionally enable all PCH interrupt
* sources here, but then only unmask them as needed with SDEIMR.
*/
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
ibx_irq_preinstall(dev);
}
static void ivybridge_irq_preinstall(struct drm_device *dev)
@ -2536,19 +2604,7 @@ static void ivybridge_irq_preinstall(struct drm_device *dev)
I915_WRITE(GEN6_PMIER, 0x0);
POSTING_READ(GEN6_PMIER);
if (HAS_PCH_NOP(dev))
return;
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
/*
* SDEIER is also touched by the interrupt handler to work around missed
* PCH interrupts. Hence we can't update it after the interrupt handler
* is enabled - instead we unconditionally enable all PCH interrupt
* sources here, but then only unmask them as needed with SDEIMR.
*/
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
ibx_irq_preinstall(dev);
}
static void valleyview_irq_preinstall(struct drm_device *dev)

View File

@ -147,15 +147,9 @@
#define VGA_MSR_MEM_EN (1<<1)
#define VGA_MSR_CGA_MODE (1<<0)
/*
* SR01 is the only VGA register touched on non-UMS setups.
* VLV doesn't do UMS, so the sequencer index/data registers
* are the only VGA registers which need to include
* display_mmio_offset.
*/
#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
#define VGA_SR_INDEX 0x3c4
#define SR01 1
#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
#define VGA_SR_DATA 0x3c5
#define VGA_AR_INDEX 0x3c0
#define VGA_AR_VID_EN (1<<5)
@ -1026,6 +1020,10 @@
#define IPS_CTL 0x43408
#define IPS_ENABLE (1 << 31)
#define MSG_FBC_REND_STATE 0x50380
#define FBC_REND_NUKE (1<<2)
#define FBC_REND_CACHE_CLEAN (1<<1)
#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
#define HSW_BYPASS_FBC_QUEUE (1<<22)
@ -1256,7 +1254,7 @@
#define DSTATE_PLL_D3_OFF (1<<3)
#define DSTATE_GFX_CLOCK_GATING (1<<1)
#define DSTATE_DOT_CLOCK_GATING (1<<0)
#define DSPCLK_GATE_D 0x6200
#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@ -1369,6 +1367,8 @@
#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
#define FW_CSPWRDWNEN (1<<15)
#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
/*
* Palette regs
*/
@ -3672,9 +3672,9 @@
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
#define GAMMA_MODE_MODE_MASK (3 << 0)
#define GAMMA_MODE_MODE_8bit (0 << 0)
#define GAMMA_MODE_MODE_10bit (1 << 0)
#define GAMMA_MODE_MODE_12bit (2 << 0)
#define GAMMA_MODE_MODE_8BIT (0 << 0)
#define GAMMA_MODE_MODE_10BIT (1 << 0)
#define GAMMA_MODE_MODE_12BIT (2 << 0)
#define GAMMA_MODE_MODE_SPLIT (3 << 0)
/* interrupts */
@ -3932,15 +3932,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@ -3980,15 +3980,9 @@
#define PCH_SSC4_AUX_PARMS 0xc6214
#define PCH_DPLL_SEL 0xc7000
#define TRANSA_DPLL_ENABLE (1<<3)
#define TRANSA_DPLLB_SEL (1<<0)
#define TRANSA_DPLLA_SEL 0
#define TRANSB_DPLL_ENABLE (1<<7)
#define TRANSB_DPLLB_SEL (1<<4)
#define TRANSB_DPLLA_SEL (0)
#define TRANSC_DPLL_ENABLE (1<<11)
#define TRANSC_DPLLB_SEL (1<<8)
#define TRANSC_DPLLA_SEL (0)
#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4))
#define TRANS_DPLLA_SEL(pipe) 0
#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3))
/* transcoder */

View File

@ -41,7 +41,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
return false;
if (HAS_PCH_SPLIT(dev))
dpll_reg = _PCH_DPLL(pipe);
dpll_reg = PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;

View File

@ -624,7 +624,7 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */,
clock, *p_out, *n2_out, *r2_out);
}
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
@ -634,6 +634,7 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
int type = intel_encoder->type;
enum pipe pipe = intel_crtc->pipe;
uint32_t reg, val;
int clock = intel_crtc->config.port_clock;
/* TODO: reuse PLLs when possible (compare values) */
@ -1278,7 +1279,6 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
pipe_config->pixel_multiplier = 1;
}
static void intel_ddi_destroy(struct drm_encoder *encoder)

File diff suppressed because it is too large Load Diff

View File

@ -677,7 +677,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
int target_clock, link_avail, link_clock;
int link_avail, link_clock;
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@ -694,8 +694,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
}
/* We need to take the panel's fixed mode into account. */
target_clock = adjusted_mode->clock;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
@ -706,12 +704,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
bpp = pipe_config->pipe_bpp;
if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(target_clock, bpp);
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@ -746,18 +744,17 @@ found:
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
pipe_config->pipe_bpp = bpp;
pipe_config->pixel_target_clock = target_clock;
pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock, bpp);
pipe_config->port_clock, bpp);
DRM_DEBUG_KMS("DP link bw required %i available %i\n",
mode_rate, link_avail);
intel_link_compute_m_n(bpp, lane_count,
target_clock, adjusted_mode->clock,
adjusted_mode->clock, pipe_config->port_clock,
&pipe_config->dp_m_n);
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@ -780,24 +777,28 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
}
}
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = crtc->dev;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
if (clock < 200000) {
if (crtc->config.port_clock == 162000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
dpa_ctl |= DP_PLL_FREQ_160MHZ;
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
I915_WRITE(DP_A, dpa_ctl);
@ -814,8 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
/*
* There are four kinds of DP registers:
@ -845,7 +845,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
@ -864,13 +864,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= intel_crtc->pipe << 29;
/* don't miss out required setting for eDP */
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
intel_dp->DP |= crtc->pipe << 29;
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
intel_dp->DP |= intel_dp->color_range;
@ -884,22 +878,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
intel_dp->DP |= DP_ENHANCED_FRAMING;
if (intel_crtc->pipe == 1)
if (crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
if (port == PORT_A && !IS_VALLEYVIEW(dev)) {
/* don't miss out required setting for eDP */
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
if (port == PORT_A && !IS_VALLEYVIEW(dev))
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
ironlake_set_pll_cpu_edp(intel_dp);
}
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)

View File

@ -140,7 +140,8 @@ struct intel_encoder {
* it is connected to in the pipe parameter. */
bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
/* Reconstructs the equivalent mode flags for the current hardware
* state. */
* state. This must be called _after_ display->get_pipe_config has
* pre-filled the pipe config. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_config *pipe_config);
int crtc_mask;
@ -193,6 +194,17 @@ typedef struct dpll {
} intel_clock_t;
struct intel_crtc_config {
/**
* quirks - bitfield with hw state readout quirks
*
* For various reasons the hw state readout code might not be able to
* completely faithfully read out the current state. These cases are
* tracked with quirk flags so that fastboot and state checker can act
* accordingly.
*/
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
struct drm_display_mode requested_mode;
struct drm_display_mode adjusted_mode;
/* This flag must be set by the encoder's compute_config callback if it
@ -241,14 +253,21 @@ struct intel_crtc_config {
* haswell. */
struct dpll dpll;
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
int pipe_bpp;
struct intel_link_m_n dp_m_n;
/**
* This is currently used by DP and HDMI encoders since those can have a
* target pixel clock != the port link clock (which is currently stored
* in adjusted_mode->clock).
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode.
*/
int pixel_target_clock;
int port_clock;
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
@ -304,8 +323,6 @@ struct intel_crtc {
struct intel_crtc_config config;
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
/* reset counter value when the last flip was submitted */
@ -562,9 +579,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring);
extern void intel_mark_idle(struct drm_device *dev);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
@ -628,11 +646,11 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
extern void intel_plane_restore(struct drm_plane *plane);
extern void intel_plane_disable(struct drm_plane *plane);
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@ -767,6 +785,10 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
/* Power well */
extern int i915_init_power_well(struct drm_device *dev);
extern void i915_remove_power_well(struct drm_device *dev);
extern bool intel_display_power_enabled(struct drm_device *dev,
enum intel_display_power_domain domain);
extern void intel_init_power_well(struct drm_device *dev);
@ -786,7 +808,7 @@ extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);

View File

@ -60,8 +60,9 @@ static struct fb_ops intelfb_ops = {
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
struct drm_device *dev = ifbdev->helper.dev;
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
@ -108,7 +109,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
info->par = ifbdev;
info->par = helper;
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
@ -217,7 +218,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@ -242,7 +243,7 @@ int intel_fbdev_init(struct drm_device *dev)
void intel_fbdev_initial_config(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
/* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
@ -250,7 +251,7 @@ void intel_fbdev_initial_config(struct drm_device *dev)
void intel_fbdev_fini(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->fbdev)
return;
@ -261,7 +262,7 @@ void intel_fbdev_fini(struct drm_device *dev)
void intel_fbdev_set_suspend(struct drm_device *dev, int state)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info;
@ -274,7 +275,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
*/
if (!state && ifbdev->ifb.obj->stolen)
if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen)
memset_io(info->screen_base, 0, info->screen_size);
fb_set_suspend(info, state);
@ -284,14 +285,14 @@ MODULE_LICENSE("GPL and additional rights");
void intel_fb_output_poll_changed(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
void intel_fb_restore_mode(struct drm_device *dev)
{
int ret;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->num_pipes == 0)
return;

View File

@ -835,9 +835,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
desired_bpp = 12*3;
/* Need to adjust the port link by 1.5x for 12bpc. */
adjusted_mode->clock = clock_12bpc;
pipe_config->pixel_target_clock =
pipe_config->requested_mode.clock;
pipe_config->port_clock = clock_12bpc;
} else {
DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
desired_bpp = 8*3;

View File

@ -264,9 +264,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return false;
}
if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
LVDS_A3_POWER_UP)
lvds_bpp = 8*3;
@ -880,7 +877,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
bool intel_lvds_init(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder;
@ -898,35 +895,35 @@ bool intel_lvds_init(struct drm_device *dev)
u8 pin;
if (!intel_lvds_supported(dev))
return false;
return;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return false;
return;
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return false;
return;
}
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return false;
return;
if (dev_priv->vbt.edp_support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return false;
return;
}
}
lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return false;
return;
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return false;
return;
}
lvds_encoder->attached_connector = lvds_connector;
@ -1097,7 +1094,7 @@ out:
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
return true;
return;
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@ -1107,5 +1104,5 @@ failed:
drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return false;
return;
}

View File

@ -312,7 +312,7 @@ static void intel_didl_outputs(struct drm_device *dev)
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
if (i >= 8) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
"More than 8 outputs detected via ACPI\n");
return;
}
status =
@ -339,7 +339,7 @@ blind_set:
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
"More than 8 outputs in connector list\n");
return;
}
switch (connector->connector_type) {

View File

@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
ret = i915_add_request(ring, &overlay->last_flip_req);
if (ret)
return ret;
@ -286,7 +286,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
return i915_add_request(ring, NULL, &overlay->last_flip_req);
return i915_add_request(ring, &overlay->last_flip_req);
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)

View File

@ -274,7 +274,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
@ -431,7 +431,7 @@ void intel_disable_fbc(struct drm_device *dev)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= 2048 in width, 1536 in height
* - framebuffer <= max_hdisplay in width, max_vdisplay in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
@ -449,6 +449,7 @@ void intel_update_fbc(struct drm_device *dev)
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int enable_fbc;
unsigned int max_hdisplay, max_vdisplay;
if (!i915_powersave)
return;
@ -507,8 +508,16 @@ void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((crtc->mode.hdisplay > 2048) ||
(crtc->mode.vdisplay > 1536)) {
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_hdisplay = 4096;
max_vdisplay = 2048;
} else {
max_hdisplay = 2048;
max_vdisplay = 1536;
}
if ((crtc->mode.hdisplay > max_hdisplay) ||
(crtc->mode.vdisplay > max_vdisplay)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
@ -2078,10 +2087,7 @@ static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate, pfit_size;
if (intel_crtc->config.pixel_target_clock)
pixel_rate = intel_crtc->config.pixel_target_clock;
else
pixel_rate = intel_crtc->config.adjusted_mode.clock;
pixel_rate = intel_crtc->config.adjusted_mode.clock;
/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
* adjust the pixel_rate here. */
@ -4381,6 +4387,19 @@ static void ibx_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void g4x_disable_trickle_feed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
}
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -4444,6 +4463,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
g4x_disable_trickle_feed(dev);
ibx_init_clock_gating(dev);
}
@ -4498,7 +4519,6 @@ static void gen6_check_mch_setup(struct drm_device *dev)
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@ -4574,12 +4594,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
g4x_disable_trickle_feed(dev);
/* The default value should be 0x200 according to docs, but the two
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
@ -4640,7 +4655,6 @@ static void lpt_suspend_hw(struct drm_device *dev)
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@ -4666,12 +4680,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
g4x_disable_trickle_feed(dev);
/* WaVSRefCountFullforceMissDisable:hsw */
gen7_setup_fixed_func_scheduler(dev_priv);
@ -4697,7 +4706,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t snpcr;
I915_WRITE(WM3_LP_ILK, 0);
@ -4766,12 +4774,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
g4x_disable_trickle_feed(dev);
/* WaMbcDriverBootEnable:ivb */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
@ -4798,13 +4801,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@ -4875,12 +4873,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
@ -4922,6 +4915,8 @@ static void g4x_init_clock_gating(struct drm_device *dev)
/* WaDisableRenderCachePipelinedFlush */
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
g4x_disable_trickle_feed(dev);
}
static void crestline_init_clock_gating(struct drm_device *dev)
@ -4933,6 +4928,8 @@ static void crestline_init_clock_gating(struct drm_device *dev)
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void broadwater_init_clock_gating(struct drm_device *dev)
@ -4945,6 +4942,8 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void gen3_init_clock_gating(struct drm_device *dev)
@ -5022,18 +5021,12 @@ bool intel_display_power_enabled(struct drm_device *dev,
}
}
void intel_set_power_well(struct drm_device *dev, bool enable)
static void __intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
uint32_t tmp;
if (!HAS_POWER_WELL(dev))
return;
if (!i915_disable_power_well && !enable)
return;
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE;
enable_requested = tmp & HSW_PWR_WELL_ENABLE;
@ -5056,6 +5049,79 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
}
}
static struct i915_power_well *hsw_pwr;
/* Display audio driver power well request */
void i915_request_power_well(void)
{
if (WARN_ON(!hsw_pwr))
return;
spin_lock_irq(&hsw_pwr->lock);
if (!hsw_pwr->count++ &&
!hsw_pwr->i915_request)
__intel_set_power_well(hsw_pwr->device, true);
spin_unlock_irq(&hsw_pwr->lock);
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
/* Display audio driver power well release */
void i915_release_power_well(void)
{
if (WARN_ON(!hsw_pwr))
return;
spin_lock_irq(&hsw_pwr->lock);
WARN_ON(!hsw_pwr->count);
if (!--hsw_pwr->count &&
!hsw_pwr->i915_request)
__intel_set_power_well(hsw_pwr->device, false);
spin_unlock_irq(&hsw_pwr->lock);
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
int i915_init_power_well(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
hsw_pwr = &dev_priv->power_well;
hsw_pwr->device = dev;
spin_lock_init(&hsw_pwr->lock);
hsw_pwr->count = 0;
return 0;
}
void i915_remove_power_well(struct drm_device *dev)
{
hsw_pwr = NULL;
}
void intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_power_well *power_well = &dev_priv->power_well;
if (!HAS_POWER_WELL(dev))
return;
if (!i915_disable_power_well && !enable)
return;
spin_lock_irq(&power_well->lock);
power_well->i915_request = enable;
/* only reject "disable" power well request */
if (power_well->count && !enable) {
spin_unlock_irq(&power_well->lock);
return;
}
__intel_set_power_well(dev, enable);
spin_unlock_irq(&power_well->lock);
}
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well

View File

@ -280,6 +280,27 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
return 0;
}
static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
{
int ret;
if (!ring->fbc_dirty)
return 0;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
/* WaFbcNukeOn3DBlt:ivb/hsw */
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, value);
intel_ring_advance(ring);
ring->fbc_dirty = false;
return 0;
}
static int
gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
@ -336,6 +357,9 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
if (flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
return 0;
}
@ -429,6 +453,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
ring->last_retired_head = -1;
}
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
out:
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_put(dev_priv);
@ -1486,7 +1512,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL, NULL);
ret = i915_add_request(ring, NULL);
if (ret)
return ret;
}
@ -1685,6 +1711,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
static int gen6_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
uint32_t cmd;
int ret;
@ -1707,6 +1734,10 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
if (IS_GEN7(dev) && flush)
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
return 0;
}

View File

@ -37,8 +37,14 @@ struct intel_hw_status_page {
#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
enum intel_ring_hangcheck_action { wait, active, kick, hung };
struct intel_ring_hangcheck {
bool deadlock;
u32 seqno;
u32 acthd;
int score;
enum intel_ring_hangcheck_action action;
};
struct intel_ring_buffer {
@ -138,6 +144,7 @@ struct intel_ring_buffer {
*/
u32 outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
wait_queue_head_t irq_queue;

View File

@ -80,7 +80,7 @@ struct intel_sdvo {
/*
* Capabilities of the SDVO device returned by
* i830_sdvo_get_capabilities()
* intel_sdvo_get_capabilities()
*/
struct intel_sdvo_caps caps;
@ -1219,6 +1219,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
switch (intel_crtc->config.pixel_multiplier) {
default:
WARN(1, "unknown pixel mutlipler specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@ -1276,7 +1277,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(&connector->base);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
u16 active_outputs;
u16 active_outputs = 0;
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
@ -1292,7 +1293,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
u16 active_outputs;
u16 active_outputs = 0;
u32 tmp;
tmp = I915_READ(intel_sdvo->sdvo_reg);
@ -1312,28 +1313,69 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_sdvo_dtd dtd;
u32 flags = 0;
int encoder_pixel_multiplier = 0;
u32 flags = 0, sdvox;
u8 val;
bool ret;
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
/* Some sdvo encoders are not spec compliant and don't
* implement the mandatory get_timings function. */
DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
return;
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
}
if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
* the sdvo port register, on all other platforms it is part of the dpll
* state. Since the general pipe state readout happens before the
* encoder->get_config we so already have a valid pixel multplier on all
* other platfroms.
*/
if (IS_I915G(dev) || IS_I915GM(dev)) {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
pipe_config->pixel_multiplier =
((sdvox & SDVO_PORT_MULTIPLY_MASK)
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
/* Cross check the port pixel multiplier with the sdvo encoder state. */
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
switch (val) {
case SDVO_CLOCK_RATE_MULT_1X:
encoder_pixel_multiplier = 1;
break;
case SDVO_CLOCK_RATE_MULT_2X:
encoder_pixel_multiplier = 2;
break;
case SDVO_CLOCK_RATE_MULT_4X:
encoder_pixel_multiplier = 4;
break;
}
if(HAS_PCH_SPLIT(dev))
return; /* no pixel multiplier readout support yet */
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
}
static void intel_disable_sdvo(struct intel_encoder *encoder)
@ -2819,7 +2861,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
u32 hotplug_mask;
int i;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
if (!intel_sdvo)
@ -2848,18 +2889,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
}
}
hotplug_mask = 0;
if (IS_G4X(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
} else if (IS_GEN4(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
} else {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
intel_encoder->mode_set = intel_sdvo_mode_set;

View File

@ -957,6 +957,14 @@ void intel_plane_restore(struct drm_plane *plane)
intel_plane->src_w, intel_plane->src_h);
}
void intel_plane_disable(struct drm_plane *plane)
{
if (!plane->crtc || !plane->fb)
return;
intel_disable_plane(plane);
}
static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,

View File

@ -914,9 +914,6 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (!tv_mode)
return false;
if (intel_encoder_check_is_cloned(&intel_tv->base))
return false;
pipe_config->adjusted_mode.clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;

View File

@ -0,0 +1,36 @@
/**************************************************************************
*
* Copyright 2013 Intel Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
#ifndef _I915_POWERWELL_H_
#define _I915_POWERWELL_H_
/* For use by hda_i915 driver */
extern void i915_request_power_well(void);
extern void i915_release_power_well(void);
#endif /* _I915_POWERWELL_H_ */

View File

@ -152,6 +152,16 @@ config SND_HDA_CODEC_HDMI
snd-hda-codec-hdmi.
This module is automatically loaded at probing.
config SND_HDA_I915
bool "Build Display HD-audio controller/codec power well support for i915 cards"
depends on DRM_I915
help
Say Y here to include full HDMI and DisplayPort HD-audio controller/codec
power-well support for Intel Haswell graphics cards based on the i915 driver.
Note that this option must be enabled for Intel Haswell C+ stepping machines, otherwise
the GPU audio controller/codecs will not be initialized or damaged when exit from S3 mode.
config SND_HDA_CODEC_CIRRUS
bool "Build Cirrus Logic codec support"
default y

View File

@ -1,4 +1,6 @@
snd-hda-intel-objs := hda_intel.o
# for haswell power well
snd-hda-intel-$(CONFIG_SND_HDA_I915) += hda_i915.o
snd-hda-codec-y := hda_codec.o hda_jack.o hda_auto_parser.o
snd-hda-codec-$(CONFIG_SND_HDA_GENERIC) += hda_generic.o

75
sound/pci/hda/hda_i915.c Normal file
View File

@ -0,0 +1,75 @@
/*
* hda_i915.c - routines for Haswell HDA controller power well support
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <sound/core.h>
#include <drm/i915_powerwell.h>
#include "hda_i915.h"
static void (*get_power)(void);
static void (*put_power)(void);
void hda_display_power(bool enable)
{
if (!get_power || !put_power)
return;
snd_printdd("HDA display power %s \n",
enable ? "Enable" : "Disable");
if (enable)
get_power();
else
put_power();
}
int hda_i915_init(void)
{
int err = 0;
get_power = symbol_request(i915_request_power_well);
if (!get_power) {
snd_printk(KERN_WARNING "hda-i915: get_power symbol get fail\n");
return -ENODEV;
}
put_power = symbol_request(i915_release_power_well);
if (!put_power) {
symbol_put(i915_request_power_well);
get_power = NULL;
return -ENODEV;
}
snd_printd("HDA driver get symbol successfully from i915 module\n");
return err;
}
int hda_i915_exit(void)
{
if (get_power) {
symbol_put(i915_request_power_well);
get_power = NULL;
}
if (put_power) {
symbol_put(i915_release_power_well);
put_power = NULL;
}
return 0;
}

35
sound/pci/hda/hda_i915.h Normal file
View File

@ -0,0 +1,35 @@
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __SOUND_HDA_I915_H
#define __SOUND_HDA_I915_H
#ifdef CONFIG_SND_HDA_I915
void hda_display_power(bool enable);
int hda_i915_init(void);
int hda_i915_exit(void);
#else
static inline void hda_display_power(bool enable) {}
static inline int hda_i915_init(void)
{
return -ENODEV;
}
static inline int hda_i915_exit(void)
{
return 0;
}
#endif
#endif

View File

@ -62,6 +62,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/firmware.h>
#include "hda_codec.h"
#include "hda_i915.h"
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@ -541,6 +542,10 @@ struct azx {
/* for pending irqs */
struct work_struct irq_pending_work;
#ifdef CONFIG_SND_HDA_I915
struct work_struct probe_work;
#endif
/* reboot notifier (for mysterious hangup problem at power-down) */
struct notifier_block reboot_notifier;
@ -594,6 +599,7 @@ enum {
#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 power well support */
/* quirks for Intel PCH */
#define AZX_DCAPS_INTEL_PCH_NOPM \
@ -2900,6 +2906,8 @@ static int azx_suspend(struct device *dev)
pci_disable_device(pci);
pci_save_state(pci);
pci_set_power_state(pci, PCI_D3hot);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(false);
return 0;
}
@ -2912,6 +2920,8 @@ static int azx_resume(struct device *dev)
if (chip->disabled)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(true);
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
if (pci_enable_device(pci) < 0) {
@ -2944,6 +2954,8 @@ static int azx_runtime_suspend(struct device *dev)
azx_stop_chip(chip);
azx_clear_irq_pending(chip);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(false);
return 0;
}
@ -2952,6 +2964,8 @@ static int azx_runtime_resume(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(true);
azx_init_pci(chip);
azx_init_chip(chip, 1);
return 0;
@ -3006,7 +3020,6 @@ static void azx_notifier_unregister(struct azx *chip)
unregister_reboot_notifier(&chip->reboot_notifier);
}
static int azx_first_init(struct azx *chip);
static int azx_probe_continue(struct azx *chip);
#ifdef SUPPORT_VGA_SWITCHEROO
@ -3033,8 +3046,7 @@ static void azx_vs_set_state(struct pci_dev *pci,
snd_printk(KERN_INFO SFX
"%s: Start delayed initialization\n",
pci_name(chip->pci));
if (azx_first_init(chip) < 0 ||
azx_probe_continue(chip) < 0) {
if (azx_probe_continue(chip) < 0) {
snd_printk(KERN_ERR SFX
"%s: initialization error\n",
pci_name(chip->pci));
@ -3120,8 +3132,13 @@ static int register_vga_switcheroo(struct azx *chip)
*/
static int azx_free(struct azx *chip)
{
struct pci_dev *pci = chip->pci;
int i;
if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
&& chip->running)
pm_runtime_get_noresume(&pci->dev);
azx_del_card_list(chip);
azx_notifier_unregister(chip);
@ -3173,6 +3190,10 @@ static int azx_free(struct azx *chip)
if (chip->fw)
release_firmware(chip->fw);
#endif
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
hda_display_power(false);
hda_i915_exit();
}
kfree(chip);
return 0;
@ -3398,6 +3419,13 @@ static void azx_check_snoop_available(struct azx *chip)
}
}
#ifdef CONFIG_SND_HDA_I915
static void azx_probe_work(struct work_struct *work)
{
azx_probe_continue(container_of(work, struct azx, probe_work));
}
#endif
/*
* constructor
*/
@ -3473,7 +3501,13 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
return err;
}
#ifdef CONFIG_SND_HDA_I915
/* continue probing in work context as may trigger request module */
INIT_WORK(&chip->probe_work, azx_probe_work);
#endif
*rchip = chip;
return 0;
}
@ -3730,11 +3764,6 @@ static int azx_probe(struct pci_dev *pci,
}
probe_now = !chip->disabled;
if (probe_now) {
err = azx_first_init(chip);
if (err < 0)
goto out_free;
}
#ifdef CONFIG_SND_HDA_PATCH_LOADER
if (patch[dev] && *patch[dev]) {
@ -3749,15 +3778,22 @@ static int azx_probe(struct pci_dev *pci,
}
#endif /* CONFIG_SND_HDA_PATCH_LOADER */
/* continue probing in work context, avoid request_module deadlock */
if (probe_now && (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)) {
#ifdef CONFIG_SND_HDA_I915
probe_now = false;
schedule_work(&chip->probe_work);
#else
snd_printk(KERN_ERR SFX "Haswell must build in CONFIG_SND_HDA_I915\n");
#endif
}
if (probe_now) {
err = azx_probe_continue(chip);
if (err < 0)
goto out_free;
}
if (pci_dev_run_wake(pci))
pm_runtime_put_noidle(&pci->dev);
dev++;
complete_all(&chip->probe_wait);
return 0;
@ -3770,9 +3806,24 @@ out_free:
static int azx_probe_continue(struct azx *chip)
{
struct pci_dev *pci = chip->pci;
int dev = chip->dev_index;
int err;
/* Request power well for Haswell HDA controller and codec */
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
err = hda_i915_init();
if (err < 0) {
snd_printk(KERN_ERR SFX "Error request power-well from i915\n");
goto out_free;
}
hda_display_power(true);
}
err = azx_first_init(chip);
if (err < 0)
goto out_free;
#ifdef CONFIG_SND_HDA_INPUT_BEEP
chip->beep_mode = beep_mode[dev];
#endif
@ -3817,6 +3868,8 @@ static int azx_probe_continue(struct azx *chip)
power_down_all_codecs(chip);
azx_notifier_register(chip);
azx_add_card_list(chip);
if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
pm_runtime_put_noidle(&pci->dev);
return 0;
@ -3829,9 +3882,6 @@ static void azx_remove(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
if (pci_dev_run_wake(pci))
pm_runtime_get_noresume(&pci->dev);
if (card)
snd_card_free(card);
pci_set_drvdata(pci, NULL);
@ -3864,11 +3914,14 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
AZX_DCAPS_I915_POWERWELL },
{ PCI_DEVICE(0x8086, 0x0c0c),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
AZX_DCAPS_I915_POWERWELL },
{ PCI_DEVICE(0x8086, 0x0d0c),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH |
AZX_DCAPS_I915_POWERWELL },
/* 5 Series/3400 */
{ PCI_DEVICE(0x8086, 0x3b56),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },