Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Misc radeon, nouveau, mgag200 and intel fixes.

  The intel fixes should contain the fix for the touchpad on the
  Chromebook - hey I'm an input maintainer now!"

Hate to pee on your parade, Dave, but I don't think being an input
maintainer is necessarily something to strive for..

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (25 commits)
  drm/tegra: drop "select DRM_HDMI"
  drm: Documentation typo fixes
  drm/mgag200: Bug fix: Renesas board now selects native resolution.
  drm/mgag200: Reject modes that are too big for VRAM
  drm/mgag200: 'fbdev_list' in 'struct mga_fbdev' is not used
  drm/radeon: don't check mipmap alignment if MIP_ADDRESS is FMASK
  drm/radeon: skip MC reset as it's probably not hung
  drm/radeon: add primary dac adj quirk for R200 board
  drm/radeon: don't set hpd, afmt interrupts when interrupts are disabled
  drm/i915: Turn off hsync and vsync on ADPA when disabling crt
  drm/i915: Fix incorrect definition of ADPA HSYNC and VSYNC bits
  drm/i915: also disable south interrupts when handling them
  drm/i915: enable irqs earlier when resuming
  drm/i915: Increase the RC6p threshold.
  DRM/i915: On G45 enable cursor plane briefly after enabling the display plane.
  drm/nv50-: prevent some races between modesetting and page flipping
  drm/nouveau/i2c: drop parent refcount when creating ports
  drm/nv84: fix regression in page flipping
  drm/nouveau: Fix typo in init_idx_addr_latched().
  drm/nouveau: Disable AGP on PowerPC again.
  ...
This commit is contained in:
Linus Torvalds 2013-03-07 14:55:54 -08:00
commit af2841cdd4
26 changed files with 275 additions and 102 deletions

View File

@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_modeset_disable(dev);
drm_irq_uninstall(dev);
dev_priv->enable_hotplug_processing = false;
}
i915_save_state(dev);
@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
/* We need working interrupts for modeset enabling ... */
drm_irq_install(dev);
intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
intel_hpd_init(dev);
dev_priv->enable_hotplug_processing = true;
}
intel_opregion_init(dev);

View File

@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, pm_iir;
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
irqreturn_t ret = IRQ_NONE;
int i;
@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
snb_gt_irq_handler(dev, dev_priv, gt_iir);
@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
return ret;
}
@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pm_iir;
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
atomic_inc(&dev_priv->irq_received);
@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pm_iir = I915_READ(GEN6_PMIIR);
@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
return ret;
}

View File

@ -1613,9 +1613,9 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
#define ADPA_VSYNC_CNTL_ENABLE 0
#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
#define ADPA_HSYNC_CNTL_ENABLE 0
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
#define ADPA_VSYNC_ACTIVE_LOW 0

View File

@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
u32 temp;
temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
temp &= ~ADPA_DAC_ENABLE;
I915_WRITE(crt->adpa_reg, temp);
}

View File

@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
enum port port = intel_dig_port->port;
bool wait;
uint32_t val;
bool wait = false;
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));

View File

@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
/**
* i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
* cursor plane briefly if not already running after enabling the display
* plane.
* This workaround avoids occasional blank screens when self refresh is
* enabled.
*/
static void
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
{
u32 cntl = I915_READ(CURCNTR(pipe));
if ((cntl & CURSOR_MODE) == 0) {
u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
intel_wait_for_vblank(dev_priv->dev, pipe);
I915_WRITE(CURCNTR(pipe), cntl);
I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
I915_WRITE(FW_BLC_SELF, fw_bcl_self);
}
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
if (IS_G4X(dev))
g4x_fixup_plane(dev_priv, pipe);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *old_fb = crtc->fb;
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_vblank_get(dev, intel_crtc->pipe);
@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
crtc->fb = old_fb;
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);

View File

@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies(10));
else
done = wait_for_atomic(C, 10) == 0;
if (!done)

View File

@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* Check if we are enabling RC6 */

View File

@ -112,7 +112,6 @@ struct mga_framebuffer {
struct mga_fbdev {
struct drm_fb_helper helper;
struct mga_framebuffer mfb;
struct list_head fbdev_list;
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;

View File

@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
int ret;
int data, clock;
WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
WREG_DAC(MGA1064_GEN_IO_CTL, 0);

View File

@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector)
static int mga_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct mga_device *mdev = (struct mga_device*)dev->dev_private;
struct mga_fbdev *mfbdev = mdev->mfbdev;
struct drm_fb_helper *fb_helper = &mfbdev->helper;
struct drm_fb_helper_connector *fb_helper_conn = NULL;
int bpp = 32;
int i = 0;
/* FIXME: Add bandwidth and g200se limitations */
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
/* Validate the mode input by the user */
for (i = 0; i < fb_helper->connector_count; i++) {
if (fb_helper->connector_info[i]->connector == connector) {
/* Found the helper for this connector */
fb_helper_conn = fb_helper->connector_info[i];
if (fb_helper_conn->cmdline_mode.specified) {
if (fb_helper_conn->cmdline_mode.bpp_specified) {
bpp = fb_helper_conn->cmdline_mode.bpp;
}
}
}
}
if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
if (fb_helper_conn)
fb_helper_conn->cmdline_mode.specified = false;
return MODE_BAD;
}
return MODE_OK;
}

View File

@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
}

View File

@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init)
init->offset += 2;
init_wr32(init, dreg, idata);
init_mask(init, creg, ~mask, data | idata);
init_mask(init, creg, ~mask, data | iaddr);
}
}

View File

@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
/* drop port's i2c subdev refcount, i2c handles this itself */
if (ret == 0) {
list_add_tail(&port->head, &i2c->ports);
atomic_dec(&parent->refcount);
atomic_dec(&engine->refcount);
}

View File

@ -47,6 +47,18 @@ nouveau_agp_enabled(struct nouveau_drm *drm)
if (drm->agp.stat == UNKNOWN) {
if (!nouveau_agpmode)
return false;
#ifdef __powerpc__
/* Disable AGP by default on all PowerPC machines for
* now -- At least some UniNorth-2 AGP bridges are
* known to be broken: DMA from the host to the card
* works just fine, but writeback from the card to the
* host goes straight to memory untranslated bypassing
* the GATT somehow, making them quite painful to deal
* with...
*/
if (nouveau_agpmode == -1)
return false;
#endif
return true;
}

View File

@ -55,9 +55,9 @@
/* offsets in shared sync bo of various structures */
#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
#define EVO_CORE_HANDLE (0xd1500000)
#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
@ -341,10 +341,8 @@ struct nv50_curs {
struct nv50_sync {
struct nv50_dmac base;
struct {
u32 offset;
u16 value;
} sem;
u32 addr;
u32 data;
};
struct nv50_ovly {
@ -471,13 +469,33 @@ nv50_display_crtc_sema(struct drm_device *dev, int crtc)
return nv50_disp(dev)->sync;
}
struct nv50_display_flip {
struct nv50_disp *disp;
struct nv50_sync *chan;
};
static bool
nv50_display_flip_wait(void *data)
{
struct nv50_display_flip *flip = data;
if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
flip->chan->data);
return true;
usleep_range(1, 2);
return false;
}
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
struct nv50_sync *sync = nv50_sync(crtc);
struct nouveau_device *device = nouveau_dev(crtc->dev);
struct nv50_display_flip flip = {
.disp = nv50_disp(crtc->dev),
.chan = nv50_sync(crtc),
};
u32 *push;
push = evo_wait(sync, 8);
push = evo_wait(flip.chan, 8);
if (push) {
evo_mthd(push, 0x0084, 1);
evo_data(push, 0x00000000);
@ -487,8 +505,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
evo_kick(push, sync);
evo_kick(push, flip.chan);
}
nv_wait_cb(device, nv50_display_flip_wait, &flip);
}
int
@ -496,11 +516,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_channel *chan, u32 swap_interval)
{
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_sync *sync = nv50_sync(crtc);
int head = nv_crtc->index, ret;
u32 *push;
int ret;
swap_interval <<= 4;
if (swap_interval == 0)
@ -510,58 +529,64 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (unlikely(push == NULL))
return -EBUSY;
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
ret = RING_SPACE(chan, 8);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + head);
OUT_RING (chan, sync->addr ^ 0x10);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, sync->data + 1);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
OUT_RING (chan, sync->addr);
OUT_RING (chan, sync->data);
} else
if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
ret = RING_SPACE(chan, 12);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr ^ 0x10));
OUT_RING (chan, lower_32_bits(addr ^ 0x10));
OUT_RING (chan, sync->data + 1);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr));
OUT_RING (chan, lower_32_bits(addr));
OUT_RING (chan, sync->data);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
} else
if (chan) {
u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
ret = RING_SPACE(chan, 10);
if (ret)
return ret;
if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, sync->sem.offset);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 0xf00d0000 | sync->sem.value);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
OUT_RING (chan, sync->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, NvSema);
} else
if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
offset += sync->sem.offset;
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | sync->sem.value);
OUT_RING (chan, 0x00000002);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
OUT_RING (chan, 0x00000001);
} else {
u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
offset += sync->sem.offset;
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | sync->sem.value);
OUT_RING (chan, 0x00001002);
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
OUT_RING (chan, 0x00001001);
}
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr ^ 0x10));
OUT_RING (chan, lower_32_bits(addr ^ 0x10));
OUT_RING (chan, sync->data + 1);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr));
OUT_RING (chan, lower_32_bits(addr));
OUT_RING (chan, sync->data);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
}
if (chan) {
sync->addr ^= 0x10;
sync->data++;
FIRE_RING (chan);
} else {
nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
0xf00d0000 | sync->sem.value);
evo_sync(crtc->dev);
}
@ -575,9 +600,9 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_data(push, 0x40000000);
}
evo_mthd(push, 0x0088, 4);
evo_data(push, sync->sem.offset);
evo_data(push, 0xf00d0000 | sync->sem.value);
evo_data(push, 0x74b1e000);
evo_data(push, sync->addr);
evo_data(push, sync->data++);
evo_data(push, sync->data);
evo_data(push, NvEvoSync);
evo_mthd(push, 0x00a0, 2);
evo_data(push, 0x00000000);
@ -605,9 +630,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
evo_kick(push, sync);
sync->sem.offset ^= 0x10;
sync->sem.value++;
return 0;
}
@ -1379,7 +1401,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
if (ret)
goto out;
head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
head->sync.addr = EVO_FLIP_SEM0(index);
head->sync.data = 0x00000000;
/* allocate overlay resources */
ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
@ -2112,15 +2135,23 @@ nv50_display_fini(struct drm_device *dev)
int
nv50_display_init(struct drm_device *dev)
{
u32 *push = evo_wait(nv50_mast(dev), 32);
if (push) {
evo_mthd(push, 0x0088, 1);
evo_data(push, NvEvoSync);
evo_kick(push, nv50_mast(dev));
return 0;
struct nv50_disp *disp = nv50_disp(dev);
struct drm_crtc *crtc;
u32 *push;
push = evo_wait(nv50_mast(dev), 32);
if (!push)
return -EBUSY;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nv50_sync *sync = nv50_sync(crtc);
nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
}
return -EBUSY;
evo_mthd(push, 0x0088, 1);
evo_data(push, NvEvoSync);
evo_kick(push, nv50_mast(dev));
return 0;
}
void

View File

@ -2438,6 +2438,12 @@ static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
/* Skip MC reset as it's mostly likely not hung, just busy */
if (reset_mask & RADEON_RESET_MC) {
DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
reset_mask &= ~RADEON_RESET_MC;
}
return reset_mask;
}

View File

@ -834,7 +834,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
__func__, __LINE__, toffset, surf.base_align);
return -EINVAL;
}
if (moffset & (surf.base_align - 1)) {
if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
__func__, __LINE__, moffset, surf.base_align);
return -EINVAL;

View File

@ -1381,6 +1381,12 @@ static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
/* Skip MC reset as it's mostly likely not hung, just busy */
if (reset_mask & RADEON_RESET_MC) {
DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
reset_mask &= ~RADEON_RESET_MC;
}
return reset_mask;
}

View File

@ -1394,6 +1394,12 @@ static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
if (r600_is_display_hung(rdev))
reset_mask |= RADEON_RESET_DISPLAY;
/* Skip MC reset as it's mostly likely not hung, just busy */
if (reset_mask & RADEON_RESET_MC) {
DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
reset_mask &= ~RADEON_RESET_MC;
}
return reset_mask;
}

View File

@ -970,6 +970,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
found = 1;
}
/* quirks */
/* Radeon 9100 (R200) */
if ((dev->pdev->device == 0x514D) &&
(dev->pdev->subsystem_vendor == 0x174B) &&
(dev->pdev->subsystem_device == 0x7149)) {
/* vbios value is bad, use the default */
found = 0;
}
if (!found) /* fallback to defaults */
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);

View File

@ -70,9 +70,10 @@
* 2.27.0 - r600-SI: Add CS ioctl support for async DMA
* 2.28.0 - r600-eg: Add MEM_WRITE packet support
* 2.29.0 - R500 FP16 color clear registers
* 2.30.0 - fix for FMASK texturing
*/
#define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 29
#define KMS_DRIVER_MINOR 30
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);

View File

@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = true;
radeon_irq_set(rdev);
@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = false;
radeon_irq_set(rdev);
@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
unsigned long irqflags;
int i;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
unsigned long irqflags;
int i;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));

View File

@ -2284,6 +2284,12 @@ static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
if (tmp & L2_BUSY)
reset_mask |= RADEON_RESET_VMC;
/* Skip MC reset as it's mostly likely not hung, just busy */
if (reset_mask & RADEON_RESET_MC) {
DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
reset_mask &= ~RADEON_RESET_MC;
}
return reset_mask;
}

View File

@ -4,7 +4,6 @@ config DRM_TEGRA
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
select DRM_HDMI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT

View File

@ -443,12 +443,12 @@ struct drm_crtc {
* @dpms: set power state (see drm_crtc_funcs above)
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidate (e.g. resume)
* @reset: reset connector after state has been invalidated (e.g. resume)
* @detect: is this connector active?
* @fill_modes: fill mode list for this connector
* @set_property: property for this connector may need update
* @set_property: property for this connector may need an update
* @destroy: make object go away
* @force: notify the driver the connector is forced on
* @force: notify the driver that the connector is forced on
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,