Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (41 commits)
  drm/radeon/kms: make sure display hw is disabled when suspending
  drm/vmwgfx: Allow userspace to change default layout. Bump minor.
  drm/vmwgfx: Fix framebuffer modesetting
  drm/vmwgfx: Fix vga save / restore with display topology.
  vgaarb: use MIT license
  vgaarb: convert pr_devel() to pr_debug()
  drm: fix typos in Linux DRM Developer's Guide
  drm/radeon/kms/pm: voltage fixes
  drm/radeon/kms/pm: radeon_set_power_state fixes
  drm/radeon/kms/pm: patch default power state with default clocks/voltages on r6xx+
  drm/radeon/kms/pm: enable SetVoltage on r7xx/evergreen
  drm/radeon/kms/pm: add support for SetVoltage cmd table (V2)
  drm/radeon/kms/evergreen: add initial CS parser
  drm/kms: disable/enable poll around switcheroo on/off
  drm/nouveau: fixup confusion over which handle the DSM is hanging off.
  drm/nouveau: attempt to get bios from ACPI v3
  drm/nv50: cast IGP memory location to u64 before shifting
  drm/ttm: Fix ttm_page_alloc.c
  drm/ttm: Fix cached TTM page allocation.
  drm/vmwgfx: Remove some leftover debug messages.
  ...
This commit is contained in:
Linus Torvalds 2010-06-03 07:19:45 -07:00
commit 1067b6c2be
48 changed files with 3540 additions and 323 deletions

View File

@ -389,7 +389,7 @@
</para>
<para>
If your driver supports memory management (it should!), you'll
need to set that up at load time as well. How you intialize
need to set that up at load time as well. How you initialize
it depends on which memory manager you're using, TTM or GEM.
</para>
<sect3>
@ -399,7 +399,7 @@
aperture space for graphics devices. TTM supports both UMA devices
and devices with dedicated video RAM (VRAM), i.e. most discrete
graphics devices. If your device has dedicated RAM, supporting
TTM is desireable. TTM also integrates tightly with your
TTM is desirable. TTM also integrates tightly with your
driver specific buffer execution function. See the radeon
driver for examples.
</para>
@ -443,7 +443,7 @@
likely eventually calling ttm_bo_global_init and
ttm_bo_global_release, respectively. Also like the previous
object, ttm_global_item_ref is used to create an initial reference
count for the TTM, which will call your initalization function.
count for the TTM, which will call your initialization function.
</para>
</sect3>
<sect3>
@ -557,7 +557,7 @@ void intel_crt_init(struct drm_device *dev)
CRT connector and encoder combination is created. A device
specific i2c bus is also created, for fetching EDID data and
performing monitor detection. Once the process is complete,
the new connector is regsitered with sysfs, to make its
the new connector is registered with sysfs, to make its
properties available to applications.
</para>
<sect4>
@ -581,12 +581,12 @@ void intel_crt_init(struct drm_device *dev)
<para>
For each encoder, CRTC and connector, several functions must
be provided, depending on the object type. Encoder objects
need should provide a DPMS (basically on/off) function, mode fixup
need to provide a DPMS (basically on/off) function, mode fixup
(for converting requested modes into native hardware timings),
and prepare, set and commit functions for use by the core DRM
helper functions. Connector helpers need to provide mode fetch and
validity functions as well as an encoder matching function for
returing an ideal encoder for a given connector. The core
returning an ideal encoder for a given connector. The core
connector functions include a DPMS callback, (deprecated)
save/restore routines, detection, mode probing, property handling,
and cleanup functions.

View File

@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work)
}
}
void drm_kms_helper_poll_init(struct drm_device *dev)
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
struct drm_connector *connector;
bool poll = false;
struct drm_connector *connector;
int ret;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
}
slow_work_register_user(THIS_MODULE);
delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
&output_poll_ops);
if (poll) {
ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
DRM_ERROR("delayed enqueue failed %d\n", ret);
}
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
void drm_kms_helper_poll_init(struct drm_device *dev)
{
slow_work_register_user(THIS_MODULE);
delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
&output_poll_ops);
dev->mode_config.poll_enabled = true;
drm_kms_helper_poll_enable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
drm_kms_helper_poll_disable(dev);
slow_work_unregister_user(THIS_MODULE);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);

View File

@ -1320,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_INFO "i915: switched off\n");
printk(KERN_INFO "i915: switched on\n");
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "i915: switched off\n");
drm_kms_helper_poll_disable(dev);
i915_suspend(dev, pmm);
}
}

View File

@ -34,7 +34,7 @@
static struct nouveau_dsm_priv {
bool dsm_detected;
acpi_handle dhandle;
acpi_handle dsm_handle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
static const char nouveau_dsm_muid[] = {
@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
else
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
}
static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
if (id == VGA_SWITCHEROO_IGD)
return 0;
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
static int nouveau_dsm_init(void)
@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
if (ACPI_FAILURE(status)) {
return false;
}
ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
if (ret < 0)
return false;
nouveau_dsm_priv.dhandle = dhandle;
nouveau_dsm_priv.dsm_handle = nvidia_handle;
return true;
}
@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void)
struct pci_dev *pdev = NULL;
int has_dsm = 0;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void)
}
if (vga_count == 2 && has_dsm) {
acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void)
{
vga_switcheroo_unregister_handler();
}
/* retrieve the ROM in 4k blocks */
static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object rom_arg_elements[2], *obj;
struct acpi_object_list rom_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
rom_arg.count = 2;
rom_arg.pointer = &rom_arg_elements[0];
rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
rom_arg_elements[0].integer.value = offset;
rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
rom_arg_elements[1].integer.value = len;
status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, len);
kfree(buffer.pointer);
return len;
}
bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
{
acpi_status status;
acpi_handle dhandle, rom_handle;
if (!nouveau_dsm_priv.dsm_detected)
return false;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
if (ACPI_FAILURE(status))
return false;
nouveau_dsm_priv.rom_handle = rom_handle;
return true;
}
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
{
return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
}

View File

@ -178,6 +178,25 @@ out:
pci_disable_rom(dev->pdev);
}
static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
{
int i;
int ret;
int size = 64 * 1024;
if (!nouveau_acpi_rom_supported(dev->pdev))
return;
for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
ret = nouveau_acpi_get_bios_chunk(data,
(i * ROM_BIOS_PAGE),
ROM_BIOS_PAGE);
if (ret <= 0)
break;
}
return;
}
struct methods {
const char desc[8];
void (*loadbios)(struct drm_device *, uint8_t *);
@ -191,6 +210,7 @@ static struct methods nv04_methods[] = {
};
static struct methods nv50_methods[] = {
{ "ACPI", load_vbios_acpi, true },
{ "PRAMIN", load_vbios_pramin, true },
{ "PROM", load_vbios_prom, false },
{ "PCIROM", load_vbios_pci, true },
@ -2807,7 +2827,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
offset, gpio->tag, gpio->state_default);
if (bios->execute)
nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
/* The NVIDIA binary driver doesn't appear to actually do
* any of this, my VBIOS does however.
@ -5533,12 +5556,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
/*
* Normal entries consist of a single bit, but dual link has the
* next most significant bit set too
*/
entry->duallink_possible =
((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
switch (entry->type) {
case OUTPUT_ANALOG:
@ -5622,6 +5639,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
break;
}
if (dcb->version < 0x40) {
/* Normal entries consist of a single bit, but dual link has
* the next most significant bit set too
*/
entry->duallink_possible =
((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
} else {
entry->duallink_possible = (entry->sorconf.link == 3);
}
/* unsure what DCB version introduces this, 3.0? */
if (conf & 0x100000)
entry->i2c_upper_default = true;
@ -6205,6 +6232,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
nouveau_i2c_fini(dev, entry);
}
static bool
nouveau_bios_posted(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
bool was_locked;
unsigned htotal;
if (dev_priv->chipset >= NV_50) {
if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
NVReadVgaCrtc(dev, 0, 0x1a) == 0)
return false;
return true;
}
was_locked = NVLockVgaCrtcs(dev, false);
htotal = NVReadVgaCrtc(dev, 0, 0x06);
htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
NVLockVgaCrtcs(dev, was_locked);
return (htotal != 0);
}
int
nouveau_bios_init(struct drm_device *dev)
{
@ -6239,11 +6290,9 @@ nouveau_bios_init(struct drm_device *dev)
bios->execute = false;
/* ... unless card isn't POSTed already */
if (dev_priv->card_type >= NV_10 &&
NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
if (!nouveau_bios_posted(dev)) {
NV_INFO(dev, "Adaptor not initialised\n");
if (dev_priv->card_type < NV_50) {
if (dev_priv->card_type < NV_40) {
NV_ERROR(dev, "Unable to POST this chipset\n");
return -ENODEV;
}

View File

@ -432,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
static struct drm_display_mode *
nouveau_connector_native_mode(struct nouveau_connector *connector)
nouveau_connector_native_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_connector_helper_funcs *helper = connector->helper_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *largest = NULL;
int high_w = 0, high_h = 0, high_v = 0;
/* Use preferred mode if there is one.. */
list_for_each_entry(mode, &connector->base.probed_modes, head) {
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
if (helper->mode_valid(connector, mode) != MODE_OK)
continue;
/* Use preferred mode if there is one.. */
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
NV_DEBUG_KMS(dev, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
}
/* Otherwise, take the resolution with the largest width, then height,
* then vertical refresh
*/
list_for_each_entry(mode, &connector->base.probed_modes, head) {
/* Otherwise, take the resolution with the largest width, then
* height, then vertical refresh
*/
if (mode->hdisplay < high_w)
continue;
@ -553,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
*/
if (!nv_connector->native_mode)
nv_connector->native_mode =
nouveau_connector_native_mode(nv_connector);
nouveau_connector_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
@ -584,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
switch (nv_encoder->dcb->type) {
case OUTPUT_LVDS:
BUG_ON(!nv_connector->native_mode);
if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
mode->vdisplay > nv_connector->native_mode->vdisplay)
if (nv_connector->native_mode &&
(mode->hdisplay > nv_connector->native_mode->hdisplay ||
mode->vdisplay > nv_connector->native_mode->vdisplay))
return MODE_PANEL;
min_clock = 0;
@ -594,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
break;
case OUTPUT_TMDS:
if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
(dev_priv->card_type < NV_50 &&
!nv_encoder->dcb->duallink_possible))
!nv_encoder->dcb->duallink_possible)
max_clock = 165000;
else
max_clock = 330000;
@ -729,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
if (ret == 0)
goto out;
nv_connector->detected_encoder = nv_encoder;
nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
nv_connector->native_mode = nouveau_connector_native_mode(connector);
list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
drm_mode_remove(connector, mode);

View File

@ -40,6 +40,8 @@ struct nouveau_crtc {
int sharpness;
int last_dpms;
int cursor_saved_x, cursor_saved_y;
struct {
int cpp;
bool blanked;

View File

@ -175,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
nouveau_bo_unpin(nouveau_fb->nvbo);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
}
NV_INFO(dev, "Evicting buffers...\n");
ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
@ -314,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev)
nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
NV_ERROR(dev, "Could not pin/map cursor.\n");
}
if (dev_priv->card_type < NV_50) {
nv04_display_restore(dev);
NVLockVgaCrtcs(dev, false);
} else
nv50_display_init(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nv_crtc->cursor.set_offset(nv_crtc,
nv_crtc->cursor.nvbo->bo.offset -
dev_priv->vm_vram_base);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
nv_crtc->cursor_saved_y);
}
/* Force CLUT to get re-loaded during modeset */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);

View File

@ -851,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *);
extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
/* nouveau_acpi.c */
#define ROM_BIOS_PAGE 4096
#if defined(CONFIG_ACPI)
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
#else
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
#endif
/* nouveau_backlight.c */

View File

@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev)
dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
dev_priv->vram_sys_base <<= 12;
}
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));

View File

@ -376,12 +376,15 @@ out_err:
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
nouveau_pci_resume(pdev);
drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
drm_kms_helper_poll_disable(dev);
nouveau_pci_suspend(pdev, pmm);
}
}
@ -913,6 +916,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = dev_priv->vm_vram_base;
break;
case NOUVEAU_GETPARAM_PTIMER_TIME:
getparam->value = dev_priv->engine.timer.read(dev);
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card

View File

@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
static void
nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
NV_PRAMDAC_CU_START_POS,
XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |

View File

@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
{
struct drm_device *dev = nv_crtc->base.dev;
nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
((y & 0xFFFF) << 16) | (x & 0xFFFF));
/* Needed to make the cursor move. */

View File

@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
int
nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_encoder *nv_encoder = NULL;
struct drm_encoder *encoder;
bool dum;
@ -324,11 +323,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
uint32_t tmp;
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
else
tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
switch ((tmp & 0x00000f00) >> 8) {
case 8:

View File

@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
$(call if_changed,mkregtable)
$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
$(call if_changed,mkregtable)
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o
evergreen.o evergreen_cs.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o

View File

@ -41,7 +41,12 @@ void evergreen_fini(struct radeon_device *rdev);
void evergreen_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
radeon_atom_set_voltage(rdev, voltage->voltage);
}
void evergreen_pm_prepare(struct radeon_device *rdev)

File diff suppressed because it is too large Load Diff

View File

@ -151,6 +151,9 @@
#define EVERGREEN_DATA_FORMAT 0x6b00
# define EVERGREEN_INTERLEAVE_EN (1 << 0)
#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
#define EVERGREEN_VLINE_START_END 0x6b08
#define EVERGREEN_VLINE_STATUS 0x6bb8
# define EVERGREEN_VLINE_STAT (1 << 12)
#define EVERGREEN_VIEWPORT_START 0x6d70
#define EVERGREEN_VIEWPORT_SIZE 0x6d74

View File

@ -218,6 +218,8 @@
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
#define PA_SC_AA_CONFIG 0x28C04
#define MSAA_NUM_SAMPLES_SHIFT 0
#define MSAA_NUM_SAMPLES_MASK 0x3
#define PA_SC_CLIPRECT_RULE 0x2820C
#define PA_SC_EDGERULE 0x28230
#define PA_SC_FIFO_SIZE 0x8BCC
@ -553,4 +555,466 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
/*
* PM4
*/
#define PACKET_TYPE0 0
#define PACKET_TYPE1 1
#define PACKET_TYPE2 2
#define PACKET_TYPE3 3
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
#define PACKET2_PAD_SHIFT 0
#define PACKET2_PAD_MASK (0x3fffffff << 0)
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
#define PACKET3_CLEAR_STATE 0x12
#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
#define PACKET3_PRED_EXEC 0x23
#define PACKET3_DRAW_INDIRECT 0x24
#define PACKET3_DRAW_INDEX_INDIRECT 0x25
#define PACKET3_INDEX_BASE 0x26
#define PACKET3_DRAW_INDEX_2 0x27
#define PACKET3_CONTEXT_CONTROL 0x28
#define PACKET3_DRAW_INDEX_OFFSET 0x29
#define PACKET3_INDEX_TYPE 0x2A
#define PACKET3_DRAW_INDEX 0x2B
#define PACKET3_DRAW_INDEX_AUTO 0x2D
#define PACKET3_DRAW_INDEX_IMMD 0x2E
#define PACKET3_NUM_INSTANCES 0x2F
#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
# define PACKET3_FULL_CACHE_ENA (1 << 20)
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
# define PACKET3_SMX_ACTION_ENA (1 << 28)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
#define PACKET3_EVENT_WRITE_EOP 0x47
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
#define PACKET3_RB_OFFSET 0x4B
#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
#define PACKET3_ONE_REG_WRITE 0x57
#define PACKET3_SET_CONFIG_REG 0x68
#define PACKET3_SET_CONFIG_REG_START 0x00008000
#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
#define PACKET3_SET_CONTEXT_REG 0x69
#define PACKET3_SET_CONTEXT_REG_START 0x00028000
#define PACKET3_SET_CONTEXT_REG_END 0x00029000
#define PACKET3_SET_ALU_CONST 0x6A
/* alu const buffers only; no reg file */
#define PACKET3_SET_BOOL_CONST 0x6B
#define PACKET3_SET_BOOL_CONST_START 0x0003a500
#define PACKET3_SET_BOOL_CONST_END 0x0003a518
#define PACKET3_SET_LOOP_CONST 0x6C
#define PACKET3_SET_LOOP_CONST_START 0x0003a200
#define PACKET3_SET_LOOP_CONST_END 0x0003a500
#define PACKET3_SET_RESOURCE 0x6D
#define PACKET3_SET_RESOURCE_START 0x00030000
#define PACKET3_SET_RESOURCE_END 0x00038000
#define PACKET3_SET_SAMPLER 0x6E
#define PACKET3_SET_SAMPLER_START 0x0003c000
#define PACKET3_SET_SAMPLER_END 0x0003c600
#define PACKET3_SET_CTL_CONST 0x6F
#define PACKET3_SET_CTL_CONST_START 0x0003cff0
#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
#define PACKET3_SET_RESOURCE_OFFSET 0x70
#define PACKET3_SET_ALU_CONST_VS 0x71
#define PACKET3_SET_ALU_CONST_DI 0x72
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
#define PACKET3_SET_RESOURCE_INDIRECT 0x74
#define PACKET3_SET_APPEND_CNT 0x75
#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3)
#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
#define SQ_TEX_VTX_INVALID_BUFFER 0x1
#define SQ_TEX_VTX_VALID_TEXTURE 0x2
#define SQ_TEX_VTX_VALID_BUFFER 0x3
#define SQ_CONST_MEM_BASE 0x8df8
#define SQ_ESGS_RING_SIZE 0x8c44
#define SQ_GSVS_RING_SIZE 0x8c4c
#define SQ_ESTMP_RING_SIZE 0x8c54
#define SQ_GSTMP_RING_SIZE 0x8c5c
#define SQ_VSTMP_RING_SIZE 0x8c64
#define SQ_PSTMP_RING_SIZE 0x8c6c
#define SQ_LSTMP_RING_SIZE 0x8e14
#define SQ_HSTMP_RING_SIZE 0x8e1c
#define VGT_TF_RING_SIZE 0x8988
#define SQ_ESGS_RING_ITEMSIZE 0x28900
#define SQ_GSVS_RING_ITEMSIZE 0x28904
#define SQ_ESTMP_RING_ITEMSIZE 0x28908
#define SQ_GSTMP_RING_ITEMSIZE 0x2890c
#define SQ_VSTMP_RING_ITEMSIZE 0x28910
#define SQ_PSTMP_RING_ITEMSIZE 0x28914
#define SQ_LSTMP_RING_ITEMSIZE 0x28830
#define SQ_HSTMP_RING_ITEMSIZE 0x28834
#define SQ_GS_VERT_ITEMSIZE 0x2891c
#define SQ_GS_VERT_ITEMSIZE_1 0x28920
#define SQ_GS_VERT_ITEMSIZE_2 0x28924
#define SQ_GS_VERT_ITEMSIZE_3 0x28928
#define SQ_GSVS_RING_OFFSET_1 0x2892c
#define SQ_GSVS_RING_OFFSET_2 0x28930
#define SQ_GSVS_RING_OFFSET_3 0x28934
#define SQ_ALU_CONST_CACHE_PS_0 0x28940
#define SQ_ALU_CONST_CACHE_PS_1 0x28944
#define SQ_ALU_CONST_CACHE_PS_2 0x28948
#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
#define SQ_ALU_CONST_CACHE_PS_4 0x28950
#define SQ_ALU_CONST_CACHE_PS_5 0x28954
#define SQ_ALU_CONST_CACHE_PS_6 0x28958
#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
#define SQ_ALU_CONST_CACHE_PS_8 0x28960
#define SQ_ALU_CONST_CACHE_PS_9 0x28964
#define SQ_ALU_CONST_CACHE_PS_10 0x28968
#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
#define SQ_ALU_CONST_CACHE_PS_12 0x28970
#define SQ_ALU_CONST_CACHE_PS_13 0x28974
#define SQ_ALU_CONST_CACHE_PS_14 0x28978
#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
#define SQ_ALU_CONST_CACHE_VS_0 0x28980
#define SQ_ALU_CONST_CACHE_VS_1 0x28984
#define SQ_ALU_CONST_CACHE_VS_2 0x28988
#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
#define SQ_ALU_CONST_CACHE_VS_4 0x28990
#define SQ_ALU_CONST_CACHE_VS_5 0x28994
#define SQ_ALU_CONST_CACHE_VS_6 0x28998
#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
#define SQ_ALU_CONST_CACHE_HS_0 0x28f00
#define SQ_ALU_CONST_CACHE_HS_1 0x28f04
#define SQ_ALU_CONST_CACHE_HS_2 0x28f08
#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c
#define SQ_ALU_CONST_CACHE_HS_4 0x28f10
#define SQ_ALU_CONST_CACHE_HS_5 0x28f14
#define SQ_ALU_CONST_CACHE_HS_6 0x28f18
#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c
#define SQ_ALU_CONST_CACHE_HS_8 0x28f20
#define SQ_ALU_CONST_CACHE_HS_9 0x28f24
#define SQ_ALU_CONST_CACHE_HS_10 0x28f28
#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c
#define SQ_ALU_CONST_CACHE_HS_12 0x28f30
#define SQ_ALU_CONST_CACHE_HS_13 0x28f34
#define SQ_ALU_CONST_CACHE_HS_14 0x28f38
#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c
#define SQ_ALU_CONST_CACHE_LS_0 0x28f40
#define SQ_ALU_CONST_CACHE_LS_1 0x28f44
#define SQ_ALU_CONST_CACHE_LS_2 0x28f48
#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c
#define SQ_ALU_CONST_CACHE_LS_4 0x28f50
#define SQ_ALU_CONST_CACHE_LS_5 0x28f54
#define SQ_ALU_CONST_CACHE_LS_6 0x28f58
#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c
#define SQ_ALU_CONST_CACHE_LS_8 0x28f60
#define SQ_ALU_CONST_CACHE_LS_9 0x28f64
#define SQ_ALU_CONST_CACHE_LS_10 0x28f68
#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c
#define SQ_ALU_CONST_CACHE_LS_12 0x28f70
#define SQ_ALU_CONST_CACHE_LS_13 0x28f74
#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
#define DB_DEPTH_CONTROL 0x28800
#define DB_DEPTH_VIEW 0x28008
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
#define DB_Z_WRITE_BASE 0x28050
#define DB_STENCIL_WRITE_BASE 0x28054
#define DB_DEPTH_SIZE 0x28058
#define SQ_PGM_START_PS 0x28840
#define SQ_PGM_START_VS 0x2885c
#define SQ_PGM_START_GS 0x28874
#define SQ_PGM_START_ES 0x2888c
#define SQ_PGM_START_FS 0x288a4
#define SQ_PGM_START_HS 0x288b8
#define SQ_PGM_START_LS 0x288d0
#define VGT_STRMOUT_CONFIG 0x28b94
#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
#define CB_TARGET_MASK 0x28238
#define CB_SHADER_MASK 0x2823c
#define GDS_ADDR_BASE 0x28720
#define CB_IMMED0_BASE 0x28b9c
#define CB_IMMED1_BASE 0x28ba0
#define CB_IMMED2_BASE 0x28ba4
#define CB_IMMED3_BASE 0x28ba8
#define CB_IMMED4_BASE 0x28bac
#define CB_IMMED5_BASE 0x28bb0
#define CB_IMMED6_BASE 0x28bb4
#define CB_IMMED7_BASE 0x28bb8
#define CB_IMMED8_BASE 0x28bbc
#define CB_IMMED9_BASE 0x28bc0
#define CB_IMMED10_BASE 0x28bc4
#define CB_IMMED11_BASE 0x28bc8
/* all 12 CB blocks have these regs */
#define CB_COLOR0_BASE 0x28c60
#define CB_COLOR0_PITCH 0x28c64
#define CB_COLOR0_SLICE 0x28c68
#define CB_COLOR0_VIEW 0x28c6c
#define CB_COLOR0_INFO 0x28c70
# define CB_ARRAY_MODE(x) ((x) << 8)
# define ARRAY_LINEAR_GENERAL 0
# define ARRAY_LINEAR_ALIGNED 1
# define ARRAY_1D_TILED_THIN1 2
# define ARRAY_2D_TILED_THIN1 4
#define CB_COLOR0_ATTRIB 0x28c74
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
#define CB_COLOR0_CMASK_SLICE 0x28c80
#define CB_COLOR0_FMASK 0x28c84
#define CB_COLOR0_FMASK_SLICE 0x28c88
#define CB_COLOR0_CLEAR_WORD0 0x28c8c
#define CB_COLOR0_CLEAR_WORD1 0x28c90
#define CB_COLOR0_CLEAR_WORD2 0x28c94
#define CB_COLOR0_CLEAR_WORD3 0x28c98
#define CB_COLOR1_BASE 0x28c9c
#define CB_COLOR2_BASE 0x28cd8
#define CB_COLOR3_BASE 0x28d14
#define CB_COLOR4_BASE 0x28d50
#define CB_COLOR5_BASE 0x28d8c
#define CB_COLOR6_BASE 0x28dc8
#define CB_COLOR7_BASE 0x28e04
#define CB_COLOR8_BASE 0x28e40
#define CB_COLOR9_BASE 0x28e5c
#define CB_COLOR10_BASE 0x28e78
#define CB_COLOR11_BASE 0x28e94
#define CB_COLOR1_PITCH 0x28ca0
#define CB_COLOR2_PITCH 0x28cdc
#define CB_COLOR3_PITCH 0x28d18
#define CB_COLOR4_PITCH 0x28d54
#define CB_COLOR5_PITCH 0x28d90
#define CB_COLOR6_PITCH 0x28dcc
#define CB_COLOR7_PITCH 0x28e08
#define CB_COLOR8_PITCH 0x28e44
#define CB_COLOR9_PITCH 0x28e60
#define CB_COLOR10_PITCH 0x28e7c
#define CB_COLOR11_PITCH 0x28e98
#define CB_COLOR1_SLICE 0x28ca4
#define CB_COLOR2_SLICE 0x28ce0
#define CB_COLOR3_SLICE 0x28d1c
#define CB_COLOR4_SLICE 0x28d58
#define CB_COLOR5_SLICE 0x28d94
#define CB_COLOR6_SLICE 0x28dd0
#define CB_COLOR7_SLICE 0x28e0c
#define CB_COLOR8_SLICE 0x28e48
#define CB_COLOR9_SLICE 0x28e64
#define CB_COLOR10_SLICE 0x28e80
#define CB_COLOR11_SLICE 0x28e9c
#define CB_COLOR1_VIEW 0x28ca8
#define CB_COLOR2_VIEW 0x28ce4
#define CB_COLOR3_VIEW 0x28d20
#define CB_COLOR4_VIEW 0x28d5c
#define CB_COLOR5_VIEW 0x28d98
#define CB_COLOR6_VIEW 0x28dd4
#define CB_COLOR7_VIEW 0x28e10
#define CB_COLOR8_VIEW 0x28e4c
#define CB_COLOR9_VIEW 0x28e68
#define CB_COLOR10_VIEW 0x28e84
#define CB_COLOR11_VIEW 0x28ea0
#define CB_COLOR1_INFO 0x28cac
#define CB_COLOR2_INFO 0x28ce8
#define CB_COLOR3_INFO 0x28d24
#define CB_COLOR4_INFO 0x28d60
#define CB_COLOR5_INFO 0x28d9c
#define CB_COLOR6_INFO 0x28dd8
#define CB_COLOR7_INFO 0x28e14
#define CB_COLOR8_INFO 0x28e50
#define CB_COLOR9_INFO 0x28e6c
#define CB_COLOR10_INFO 0x28e88
#define CB_COLOR11_INFO 0x28ea4
#define CB_COLOR1_ATTRIB 0x28cb0
#define CB_COLOR2_ATTRIB 0x28cec
#define CB_COLOR3_ATTRIB 0x28d28
#define CB_COLOR4_ATTRIB 0x28d64
#define CB_COLOR5_ATTRIB 0x28da0
#define CB_COLOR6_ATTRIB 0x28ddc
#define CB_COLOR7_ATTRIB 0x28e18
#define CB_COLOR8_ATTRIB 0x28e54
#define CB_COLOR9_ATTRIB 0x28e70
#define CB_COLOR10_ATTRIB 0x28e8c
#define CB_COLOR11_ATTRIB 0x28ea8
#define CB_COLOR1_DIM 0x28cb4
#define CB_COLOR2_DIM 0x28cf0
#define CB_COLOR3_DIM 0x28d2c
#define CB_COLOR4_DIM 0x28d68
#define CB_COLOR5_DIM 0x28da4
#define CB_COLOR6_DIM 0x28de0
#define CB_COLOR7_DIM 0x28e1c
#define CB_COLOR8_DIM 0x28e58
#define CB_COLOR9_DIM 0x28e74
#define CB_COLOR10_DIM 0x28e90
#define CB_COLOR11_DIM 0x28eac
#define CB_COLOR1_CMASK 0x28cb8
#define CB_COLOR2_CMASK 0x28cf4
#define CB_COLOR3_CMASK 0x28d30
#define CB_COLOR4_CMASK 0x28d6c
#define CB_COLOR5_CMASK 0x28da8
#define CB_COLOR6_CMASK 0x28de4
#define CB_COLOR7_CMASK 0x28e20
#define CB_COLOR1_CMASK_SLICE 0x28cbc
#define CB_COLOR2_CMASK_SLICE 0x28cf8
#define CB_COLOR3_CMASK_SLICE 0x28d34
#define CB_COLOR4_CMASK_SLICE 0x28d70
#define CB_COLOR5_CMASK_SLICE 0x28dac
#define CB_COLOR6_CMASK_SLICE 0x28de8
#define CB_COLOR7_CMASK_SLICE 0x28e24
#define CB_COLOR1_FMASK 0x28cc0
#define CB_COLOR2_FMASK 0x28cfc
#define CB_COLOR3_FMASK 0x28d38
#define CB_COLOR4_FMASK 0x28d74
#define CB_COLOR5_FMASK 0x28db0
#define CB_COLOR6_FMASK 0x28dec
#define CB_COLOR7_FMASK 0x28e28
#define CB_COLOR1_FMASK_SLICE 0x28cc4
#define CB_COLOR2_FMASK_SLICE 0x28d00
#define CB_COLOR3_FMASK_SLICE 0x28d3c
#define CB_COLOR4_FMASK_SLICE 0x28d78
#define CB_COLOR5_FMASK_SLICE 0x28db4
#define CB_COLOR6_FMASK_SLICE 0x28df0
#define CB_COLOR7_FMASK_SLICE 0x28e2c
#define CB_COLOR1_CLEAR_WORD0 0x28cc8
#define CB_COLOR2_CLEAR_WORD0 0x28d04
#define CB_COLOR3_CLEAR_WORD0 0x28d40
#define CB_COLOR4_CLEAR_WORD0 0x28d7c
#define CB_COLOR5_CLEAR_WORD0 0x28db8
#define CB_COLOR6_CLEAR_WORD0 0x28df4
#define CB_COLOR7_CLEAR_WORD0 0x28e30
#define CB_COLOR1_CLEAR_WORD1 0x28ccc
#define CB_COLOR2_CLEAR_WORD1 0x28d08
#define CB_COLOR3_CLEAR_WORD1 0x28d44
#define CB_COLOR4_CLEAR_WORD1 0x28d80
#define CB_COLOR5_CLEAR_WORD1 0x28dbc
#define CB_COLOR6_CLEAR_WORD1 0x28df8
#define CB_COLOR7_CLEAR_WORD1 0x28e34
#define CB_COLOR1_CLEAR_WORD2 0x28cd0
#define CB_COLOR2_CLEAR_WORD2 0x28d0c
#define CB_COLOR3_CLEAR_WORD2 0x28d48
#define CB_COLOR4_CLEAR_WORD2 0x28d84
#define CB_COLOR5_CLEAR_WORD2 0x28dc0
#define CB_COLOR6_CLEAR_WORD2 0x28dfc
#define CB_COLOR7_CLEAR_WORD2 0x28e38
#define CB_COLOR1_CLEAR_WORD3 0x28cd4
#define CB_COLOR2_CLEAR_WORD3 0x28d10
#define CB_COLOR3_CLEAR_WORD3 0x28d4c
#define CB_COLOR4_CLEAR_WORD3 0x28d88
#define CB_COLOR5_CLEAR_WORD3 0x28dc4
#define CB_COLOR6_CLEAR_WORD3 0x28e00
#define CB_COLOR7_CLEAR_WORD3 0x28e3c
#define SQ_TEX_RESOURCE_WORD0_0 0x30000
#define SQ_TEX_RESOURCE_WORD1_0 0x30004
# define TEX_ARRAY_MODE(x) ((x) << 28)
#define SQ_TEX_RESOURCE_WORD2_0 0x30008
#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
#define SQ_TEX_RESOURCE_WORD4_0 0x30010
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
#endif

View File

@ -475,6 +475,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
void r600_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
radeon_atom_set_voltage(rdev, voltage->voltage);
}

View File

@ -176,6 +176,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
/*
* Fences.

View File

@ -724,8 +724,8 @@ static struct radeon_asic evergreen_asic = {
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = NULL,
.cs_parse = NULL,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = NULL,
.copy_dma = NULL,
.copy = NULL,

View File

@ -314,6 +314,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);

View File

@ -1538,7 +1538,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].pcie_lanes =
power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
(misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@ -1605,7 +1606,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
(misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@ -1679,7 +1681,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
(misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@ -1755,9 +1758,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].misc2 = 0;
}
} else {
int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
uint8_t fw_frev, fw_crev;
uint16_t fw_data_offset, vddc = 0;
union firmware_info *firmware_info;
ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
&fw_frev, &fw_crev, &fw_data_offset)) {
firmware_info =
(union firmware_info *)(mode_info->atom_context->bios +
fw_data_offset);
vddc = firmware_info->info_14.usBootUpVDDCVoltage;
}
/* add the i2c bus for thermal/fan chip */
/* no support for internal controller yet */
ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
if (controller->ucType > 0) {
if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
(controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
@ -1904,6 +1920,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
/* patch the table values with the default slck/mclk from firmware info */
for (j = 0; j < mode_index; j++) {
rdev->pm.power_state[state_index].clock_info[j].mclk =
rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[j].sclk =
rdev->clock.default_sclk;
if (vddc)
rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
vddc;
}
}
state_index++;
}
@ -1998,6 +2024,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
union set_voltage {
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
struct _SET_VOLTAGE_PARAMETERS v1;
struct _SET_VOLTAGE_PARAMETERS_V2 v2;
};
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
u8 frev, crev, volt_index = level;
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
switch (crev) {
case 1:
args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
args.v1.ucVoltageIndex = volt_index;
break;
case 2:
args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
args.v2.usVoltageLevel = cpu_to_le16(level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;

View File

@ -2454,7 +2454,12 @@ default_mode:
rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
if ((state_index > 0) &&
(rdev->pm.power_state[0].clock_info[0].voltage.type = VOLTAGE_GPIO))
rdev->pm.power_state[state_index].clock_info[0].voltage =
rdev->pm.power_state[0].clock_info[0].voltage;
else
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
rdev->pm.power_state[state_index].pcie_lanes = 16;
rdev->pm.power_state[state_index].flags = 0;
rdev->pm.default_power_state_index = state_index;

View File

@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
rdev->powered_down = false;
radeon_resume_kms(dev);
drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
radeon_suspend_kms(dev, pmm);
/* don't suspend or resume card normally */
rdev->powered_down = true;
@ -711,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@ -723,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (rdev->powered_down)
return 0;
/* turn off display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);

View File

@ -151,6 +151,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev)
static void radeon_set_power_state(struct radeon_device *rdev)
{
u32 sclk, mclk;
bool misc_after = false;
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
@ -167,55 +168,47 @@ static void radeon_set_power_state(struct radeon_device *rdev)
if (mclk > rdev->clock.default_mclk)
mclk = rdev->clock.default_mclk;
/* voltage, pcie lanes, etc.*/
radeon_pm_misc(rdev);
/* upvolt before raising clocks, downvolt after lowering clocks */
if (sclk < rdev->pm.current_sclk)
misc_after = true;
radeon_sync_with_vblank(rdev);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
radeon_sync_with_vblank(rdev);
if (!radeon_pm_in_vbl(rdev))
return;
radeon_pm_prepare(rdev);
/* set engine clock */
if (sclk != rdev->pm.current_sclk) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_engine_clock(rdev, sclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_sclk = sclk;
DRM_DEBUG("Setting: e: %d\n", sclk);
}
/* set memory clock */
if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_mclk = mclk;
DRM_DEBUG("Setting: m: %d\n", mclk);
}
radeon_pm_finish(rdev);
} else {
/* set engine clock */
if (sclk != rdev->pm.current_sclk) {
radeon_sync_with_vblank(rdev);
radeon_pm_prepare(rdev);
radeon_set_engine_clock(rdev, sclk);
radeon_pm_finish(rdev);
rdev->pm.current_sclk = sclk;
DRM_DEBUG("Setting: e: %d\n", sclk);
}
/* set memory clock */
if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_sync_with_vblank(rdev);
radeon_pm_prepare(rdev);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_finish(rdev);
rdev->pm.current_mclk = mclk;
DRM_DEBUG("Setting: m: %d\n", mclk);
}
}
radeon_pm_prepare(rdev);
if (!misc_after)
/* voltage, pcie lanes, etc.*/
radeon_pm_misc(rdev);
/* set engine clock */
if (sclk != rdev->pm.current_sclk) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_engine_clock(rdev, sclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_sclk = sclk;
DRM_DEBUG("Setting: e: %d\n", sclk);
}
/* set memory clock */
if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_mclk = mclk;
DRM_DEBUG("Setting: m: %d\n", mclk);
}
if (misc_after)
/* voltage, pcie lanes, etc.*/
radeon_pm_misc(rdev);
radeon_pm_finish(rdev);
rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
} else

View File

@ -0,0 +1,611 @@
evergreen 0x9400
0x00008040 WAIT_UNTIL
0x00008044 WAIT_UNTIL_POLL_CNTL
0x00008048 WAIT_UNTIL_POLL_MASK
0x0000804c WAIT_UNTIL_POLL_REFDATA
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x000088C4 VGT_CACHE_INVALIDATION
0x000088D4 VGT_GS_VERTEX_REUSE
0x00008958 VGT_PRIMITIVE_TYPE
0x0000895C VGT_INDEX_TYPE
0x00008970 VGT_NUM_INDICES
0x00008974 VGT_NUM_INSTANCES
0x00008990 VGT_COMPUTE_DIM_X
0x00008994 VGT_COMPUTE_DIM_Y
0x00008998 VGT_COMPUTE_DIM_Z
0x0000899C VGT_COMPUTE_START_X
0x000089A0 VGT_COMPUTE_START_Y
0x000089A4 VGT_COMPUTE_START_Z
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x00008A14 PA_CL_ENHANCE
0x00008A60 PA_SC_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
0x00008C00 SQ_CONFIG
0x00008C04 SQ_GPR_RESOURCE_MGMT_1
0x00008C08 SQ_GPR_RESOURCE_MGMT_2
0x00008C0C SQ_GPR_RESOURCE_MGMT_3
0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
0x00008C18 SQ_THREAD_RESOURCE_MGMT
0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
0x00008C20 SQ_STACK_RESOURCE_MGMT_1
0x00008C24 SQ_STACK_RESOURCE_MGMT_2
0x00008C28 SQ_STACK_RESOURCE_MGMT_3
0x00008DF8 SQ_CONST_MEM_BASE
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
0x00009700 VC_CNTL
0x00009714 VC_ENHANCE
0x00009830 DB_DEBUG
0x00009834 DB_DEBUG2
0x00009838 DB_DEBUG3
0x0000983C DB_DEBUG4
0x00009854 DB_WATERMARKS
0x0000A400 TD_PS_BORDER_COLOR_INDEX
0x0000A404 TD_PS_BORDER_COLOR_RED
0x0000A408 TD_PS_BORDER_COLOR_GREEN
0x0000A40C TD_PS_BORDER_COLOR_BLUE
0x0000A410 TD_PS_BORDER_COLOR_ALPHA
0x0000A414 TD_VS_BORDER_COLOR_INDEX
0x0000A418 TD_VS_BORDER_COLOR_RED
0x0000A41C TD_VS_BORDER_COLOR_GREEN
0x0000A420 TD_VS_BORDER_COLOR_BLUE
0x0000A424 TD_VS_BORDER_COLOR_ALPHA
0x0000A428 TD_GS_BORDER_COLOR_INDEX
0x0000A42C TD_GS_BORDER_COLOR_RED
0x0000A430 TD_GS_BORDER_COLOR_GREEN
0x0000A434 TD_GS_BORDER_COLOR_BLUE
0x0000A438 TD_GS_BORDER_COLOR_ALPHA
0x0000A43C TD_HS_BORDER_COLOR_INDEX
0x0000A440 TD_HS_BORDER_COLOR_RED
0x0000A444 TD_HS_BORDER_COLOR_GREEN
0x0000A448 TD_HS_BORDER_COLOR_BLUE
0x0000A44C TD_HS_BORDER_COLOR_ALPHA
0x0000A450 TD_LS_BORDER_COLOR_INDEX
0x0000A454 TD_LS_BORDER_COLOR_RED
0x0000A458 TD_LS_BORDER_COLOR_GREEN
0x0000A45C TD_LS_BORDER_COLOR_BLUE
0x0000A460 TD_LS_BORDER_COLOR_ALPHA
0x0000A464 TD_CS_BORDER_COLOR_INDEX
0x0000A468 TD_CS_BORDER_COLOR_RED
0x0000A46C TD_CS_BORDER_COLOR_GREEN
0x0000A470 TD_CS_BORDER_COLOR_BLUE
0x0000A474 TD_CS_BORDER_COLOR_ALPHA
0x00028000 DB_RENDER_CONTROL
0x00028004 DB_COUNT_CONTROL
0x0002800C DB_RENDER_OVERRIDE
0x00028010 DB_RENDER_OVERRIDE2
0x00028028 DB_STENCIL_CLEAR
0x0002802C DB_DEPTH_CLEAR
0x00028034 PA_SC_SCREEN_SCISSOR_BR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x0002805C DB_DEPTH_SLICE
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
0x00028200 PA_SC_WINDOW_OFFSET
0x00028204 PA_SC_WINDOW_SCISSOR_TL
0x00028208 PA_SC_WINDOW_SCISSOR_BR
0x0002820C PA_SC_CLIPRECT_RULE
0x00028210 PA_SC_CLIPRECT_0_TL
0x00028214 PA_SC_CLIPRECT_0_BR
0x00028218 PA_SC_CLIPRECT_1_TL
0x0002821C PA_SC_CLIPRECT_1_BR
0x00028220 PA_SC_CLIPRECT_2_TL
0x00028224 PA_SC_CLIPRECT_2_BR
0x00028228 PA_SC_CLIPRECT_3_TL
0x0002822C PA_SC_CLIPRECT_3_BR
0x00028230 PA_SC_EDGERULE
0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
0x00028240 PA_SC_GENERIC_SCISSOR_TL
0x00028244 PA_SC_GENERIC_SCISSOR_BR
0x00028250 PA_SC_VPORT_SCISSOR_0_TL
0x00028254 PA_SC_VPORT_SCISSOR_0_BR
0x00028258 PA_SC_VPORT_SCISSOR_1_TL
0x0002825C PA_SC_VPORT_SCISSOR_1_BR
0x00028260 PA_SC_VPORT_SCISSOR_2_TL
0x00028264 PA_SC_VPORT_SCISSOR_2_BR
0x00028268 PA_SC_VPORT_SCISSOR_3_TL
0x0002826C PA_SC_VPORT_SCISSOR_3_BR
0x00028270 PA_SC_VPORT_SCISSOR_4_TL
0x00028274 PA_SC_VPORT_SCISSOR_4_BR
0x00028278 PA_SC_VPORT_SCISSOR_5_TL
0x0002827C PA_SC_VPORT_SCISSOR_5_BR
0x00028280 PA_SC_VPORT_SCISSOR_6_TL
0x00028284 PA_SC_VPORT_SCISSOR_6_BR
0x00028288 PA_SC_VPORT_SCISSOR_7_TL
0x0002828C PA_SC_VPORT_SCISSOR_7_BR
0x00028290 PA_SC_VPORT_SCISSOR_8_TL
0x00028294 PA_SC_VPORT_SCISSOR_8_BR
0x00028298 PA_SC_VPORT_SCISSOR_9_TL
0x0002829C PA_SC_VPORT_SCISSOR_9_BR
0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
0x000282AC PA_SC_VPORT_SCISSOR_11_BR
0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
0x000282BC PA_SC_VPORT_SCISSOR_13_BR
0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
0x000282CC PA_SC_VPORT_SCISSOR_15_BR
0x000282D0 PA_SC_VPORT_ZMIN_0
0x000282D4 PA_SC_VPORT_ZMAX_0
0x000282D8 PA_SC_VPORT_ZMIN_1
0x000282DC PA_SC_VPORT_ZMAX_1
0x000282E0 PA_SC_VPORT_ZMIN_2
0x000282E4 PA_SC_VPORT_ZMAX_2
0x000282E8 PA_SC_VPORT_ZMIN_3
0x000282EC PA_SC_VPORT_ZMAX_3
0x000282F0 PA_SC_VPORT_ZMIN_4
0x000282F4 PA_SC_VPORT_ZMAX_4
0x000282F8 PA_SC_VPORT_ZMIN_5
0x000282FC PA_SC_VPORT_ZMAX_5
0x00028300 PA_SC_VPORT_ZMIN_6
0x00028304 PA_SC_VPORT_ZMAX_6
0x00028308 PA_SC_VPORT_ZMIN_7
0x0002830C PA_SC_VPORT_ZMAX_7
0x00028310 PA_SC_VPORT_ZMIN_8
0x00028314 PA_SC_VPORT_ZMAX_8
0x00028318 PA_SC_VPORT_ZMIN_9
0x0002831C PA_SC_VPORT_ZMAX_9
0x00028320 PA_SC_VPORT_ZMIN_10
0x00028324 PA_SC_VPORT_ZMAX_10
0x00028328 PA_SC_VPORT_ZMIN_11
0x0002832C PA_SC_VPORT_ZMAX_11
0x00028330 PA_SC_VPORT_ZMIN_12
0x00028334 PA_SC_VPORT_ZMAX_12
0x00028338 PA_SC_VPORT_ZMIN_13
0x0002833C PA_SC_VPORT_ZMAX_13
0x00028340 PA_SC_VPORT_ZMIN_14
0x00028344 PA_SC_VPORT_ZMAX_14
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
0x00028350 SX_MISC
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
0x0002838C SQ_VTX_SEMANTIC_3
0x00028390 SQ_VTX_SEMANTIC_4
0x00028394 SQ_VTX_SEMANTIC_5
0x00028398 SQ_VTX_SEMANTIC_6
0x0002839C SQ_VTX_SEMANTIC_7
0x000283A0 SQ_VTX_SEMANTIC_8
0x000283A4 SQ_VTX_SEMANTIC_9
0x000283A8 SQ_VTX_SEMANTIC_10
0x000283AC SQ_VTX_SEMANTIC_11
0x000283B0 SQ_VTX_SEMANTIC_12
0x000283B4 SQ_VTX_SEMANTIC_13
0x000283B8 SQ_VTX_SEMANTIC_14
0x000283BC SQ_VTX_SEMANTIC_15
0x000283C0 SQ_VTX_SEMANTIC_16
0x000283C4 SQ_VTX_SEMANTIC_17
0x000283C8 SQ_VTX_SEMANTIC_18
0x000283CC SQ_VTX_SEMANTIC_19
0x000283D0 SQ_VTX_SEMANTIC_20
0x000283D4 SQ_VTX_SEMANTIC_21
0x000283D8 SQ_VTX_SEMANTIC_22
0x000283DC SQ_VTX_SEMANTIC_23
0x000283E0 SQ_VTX_SEMANTIC_24
0x000283E4 SQ_VTX_SEMANTIC_25
0x000283E8 SQ_VTX_SEMANTIC_26
0x000283EC SQ_VTX_SEMANTIC_27
0x000283F0 SQ_VTX_SEMANTIC_28
0x000283F4 SQ_VTX_SEMANTIC_29
0x000283F8 SQ_VTX_SEMANTIC_30
0x000283FC SQ_VTX_SEMANTIC_31
0x00028400 VGT_MAX_VTX_INDX
0x00028404 VGT_MIN_VTX_INDX
0x00028408 VGT_INDX_OFFSET
0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
0x00028410 SX_ALPHA_TEST_CONTROL
0x00028414 CB_BLEND_RED
0x00028418 CB_BLEND_GREEN
0x0002841C CB_BLEND_BLUE
0x00028420 CB_BLEND_ALPHA
0x00028430 DB_STENCILREFMASK
0x00028434 DB_STENCILREFMASK_BF
0x00028438 SX_ALPHA_REF
0x0002843C PA_CL_VPORT_XSCALE_0
0x00028440 PA_CL_VPORT_XOFFSET_0
0x00028444 PA_CL_VPORT_YSCALE_0
0x00028448 PA_CL_VPORT_YOFFSET_0
0x0002844C PA_CL_VPORT_ZSCALE_0
0x00028450 PA_CL_VPORT_ZOFFSET_0
0x00028454 PA_CL_VPORT_XSCALE_1
0x00028458 PA_CL_VPORT_XOFFSET_1
0x0002845C PA_CL_VPORT_YSCALE_1
0x00028460 PA_CL_VPORT_YOFFSET_1
0x00028464 PA_CL_VPORT_ZSCALE_1
0x00028468 PA_CL_VPORT_ZOFFSET_1
0x0002846C PA_CL_VPORT_XSCALE_2
0x00028470 PA_CL_VPORT_XOFFSET_2
0x00028474 PA_CL_VPORT_YSCALE_2
0x00028478 PA_CL_VPORT_YOFFSET_2
0x0002847C PA_CL_VPORT_ZSCALE_2
0x00028480 PA_CL_VPORT_ZOFFSET_2
0x00028484 PA_CL_VPORT_XSCALE_3
0x00028488 PA_CL_VPORT_XOFFSET_3
0x0002848C PA_CL_VPORT_YSCALE_3
0x00028490 PA_CL_VPORT_YOFFSET_3
0x00028494 PA_CL_VPORT_ZSCALE_3
0x00028498 PA_CL_VPORT_ZOFFSET_3
0x0002849C PA_CL_VPORT_XSCALE_4
0x000284A0 PA_CL_VPORT_XOFFSET_4
0x000284A4 PA_CL_VPORT_YSCALE_4
0x000284A8 PA_CL_VPORT_YOFFSET_4
0x000284AC PA_CL_VPORT_ZSCALE_4
0x000284B0 PA_CL_VPORT_ZOFFSET_4
0x000284B4 PA_CL_VPORT_XSCALE_5
0x000284B8 PA_CL_VPORT_XOFFSET_5
0x000284BC PA_CL_VPORT_YSCALE_5
0x000284C0 PA_CL_VPORT_YOFFSET_5
0x000284C4 PA_CL_VPORT_ZSCALE_5
0x000284C8 PA_CL_VPORT_ZOFFSET_5
0x000284CC PA_CL_VPORT_XSCALE_6
0x000284D0 PA_CL_VPORT_XOFFSET_6
0x000284D4 PA_CL_VPORT_YSCALE_6
0x000284D8 PA_CL_VPORT_YOFFSET_6
0x000284DC PA_CL_VPORT_ZSCALE_6
0x000284E0 PA_CL_VPORT_ZOFFSET_6
0x000284E4 PA_CL_VPORT_XSCALE_7
0x000284E8 PA_CL_VPORT_XOFFSET_7
0x000284EC PA_CL_VPORT_YSCALE_7
0x000284F0 PA_CL_VPORT_YOFFSET_7
0x000284F4 PA_CL_VPORT_ZSCALE_7
0x000284F8 PA_CL_VPORT_ZOFFSET_7
0x000284FC PA_CL_VPORT_XSCALE_8
0x00028500 PA_CL_VPORT_XOFFSET_8
0x00028504 PA_CL_VPORT_YSCALE_8
0x00028508 PA_CL_VPORT_YOFFSET_8
0x0002850C PA_CL_VPORT_ZSCALE_8
0x00028510 PA_CL_VPORT_ZOFFSET_8
0x00028514 PA_CL_VPORT_XSCALE_9
0x00028518 PA_CL_VPORT_XOFFSET_9
0x0002851C PA_CL_VPORT_YSCALE_9
0x00028520 PA_CL_VPORT_YOFFSET_9
0x00028524 PA_CL_VPORT_ZSCALE_9
0x00028528 PA_CL_VPORT_ZOFFSET_9
0x0002852C PA_CL_VPORT_XSCALE_10
0x00028530 PA_CL_VPORT_XOFFSET_10
0x00028534 PA_CL_VPORT_YSCALE_10
0x00028538 PA_CL_VPORT_YOFFSET_10
0x0002853C PA_CL_VPORT_ZSCALE_10
0x00028540 PA_CL_VPORT_ZOFFSET_10
0x00028544 PA_CL_VPORT_XSCALE_11
0x00028548 PA_CL_VPORT_XOFFSET_11
0x0002854C PA_CL_VPORT_YSCALE_11
0x00028550 PA_CL_VPORT_YOFFSET_11
0x00028554 PA_CL_VPORT_ZSCALE_11
0x00028558 PA_CL_VPORT_ZOFFSET_11
0x0002855C PA_CL_VPORT_XSCALE_12
0x00028560 PA_CL_VPORT_XOFFSET_12
0x00028564 PA_CL_VPORT_YSCALE_12
0x00028568 PA_CL_VPORT_YOFFSET_12
0x0002856C PA_CL_VPORT_ZSCALE_12
0x00028570 PA_CL_VPORT_ZOFFSET_12
0x00028574 PA_CL_VPORT_XSCALE_13
0x00028578 PA_CL_VPORT_XOFFSET_13
0x0002857C PA_CL_VPORT_YSCALE_13
0x00028580 PA_CL_VPORT_YOFFSET_13
0x00028584 PA_CL_VPORT_ZSCALE_13
0x00028588 PA_CL_VPORT_ZOFFSET_13
0x0002858C PA_CL_VPORT_XSCALE_14
0x00028590 PA_CL_VPORT_XOFFSET_14
0x00028594 PA_CL_VPORT_YSCALE_14
0x00028598 PA_CL_VPORT_YOFFSET_14
0x0002859C PA_CL_VPORT_ZSCALE_14
0x000285A0 PA_CL_VPORT_ZOFFSET_14
0x000285A4 PA_CL_VPORT_XSCALE_15
0x000285A8 PA_CL_VPORT_XOFFSET_15
0x000285AC PA_CL_VPORT_YSCALE_15
0x000285B0 PA_CL_VPORT_YOFFSET_15
0x000285B4 PA_CL_VPORT_ZSCALE_15
0x000285B8 PA_CL_VPORT_ZOFFSET_15
0x000285BC PA_CL_UCP_0_X
0x000285C0 PA_CL_UCP_0_Y
0x000285C4 PA_CL_UCP_0_Z
0x000285C8 PA_CL_UCP_0_W
0x000285CC PA_CL_UCP_1_X
0x000285D0 PA_CL_UCP_1_Y
0x000285D4 PA_CL_UCP_1_Z
0x000285D8 PA_CL_UCP_1_W
0x000285DC PA_CL_UCP_2_X
0x000285E0 PA_CL_UCP_2_Y
0x000285E4 PA_CL_UCP_2_Z
0x000285E8 PA_CL_UCP_2_W
0x000285EC PA_CL_UCP_3_X
0x000285F0 PA_CL_UCP_3_Y
0x000285F4 PA_CL_UCP_3_Z
0x000285F8 PA_CL_UCP_3_W
0x000285FC PA_CL_UCP_4_X
0x00028600 PA_CL_UCP_4_Y
0x00028604 PA_CL_UCP_4_Z
0x00028608 PA_CL_UCP_4_W
0x0002860C PA_CL_UCP_5_X
0x00028610 PA_CL_UCP_5_Y
0x00028614 PA_CL_UCP_5_Z
0x00028618 PA_CL_UCP_5_W
0x0002861C SPI_VS_OUT_ID_0
0x00028620 SPI_VS_OUT_ID_1
0x00028624 SPI_VS_OUT_ID_2
0x00028628 SPI_VS_OUT_ID_3
0x0002862C SPI_VS_OUT_ID_4
0x00028630 SPI_VS_OUT_ID_5
0x00028634 SPI_VS_OUT_ID_6
0x00028638 SPI_VS_OUT_ID_7
0x0002863C SPI_VS_OUT_ID_8
0x00028640 SPI_VS_OUT_ID_9
0x00028644 SPI_PS_INPUT_CNTL_0
0x00028648 SPI_PS_INPUT_CNTL_1
0x0002864C SPI_PS_INPUT_CNTL_2
0x00028650 SPI_PS_INPUT_CNTL_3
0x00028654 SPI_PS_INPUT_CNTL_4
0x00028658 SPI_PS_INPUT_CNTL_5
0x0002865C SPI_PS_INPUT_CNTL_6
0x00028660 SPI_PS_INPUT_CNTL_7
0x00028664 SPI_PS_INPUT_CNTL_8
0x00028668 SPI_PS_INPUT_CNTL_9
0x0002866C SPI_PS_INPUT_CNTL_10
0x00028670 SPI_PS_INPUT_CNTL_11
0x00028674 SPI_PS_INPUT_CNTL_12
0x00028678 SPI_PS_INPUT_CNTL_13
0x0002867C SPI_PS_INPUT_CNTL_14
0x00028680 SPI_PS_INPUT_CNTL_15
0x00028684 SPI_PS_INPUT_CNTL_16
0x00028688 SPI_PS_INPUT_CNTL_17
0x0002868C SPI_PS_INPUT_CNTL_18
0x00028690 SPI_PS_INPUT_CNTL_19
0x00028694 SPI_PS_INPUT_CNTL_20
0x00028698 SPI_PS_INPUT_CNTL_21
0x0002869C SPI_PS_INPUT_CNTL_22
0x000286A0 SPI_PS_INPUT_CNTL_23
0x000286A4 SPI_PS_INPUT_CNTL_24
0x000286A8 SPI_PS_INPUT_CNTL_25
0x000286AC SPI_PS_INPUT_CNTL_26
0x000286B0 SPI_PS_INPUT_CNTL_27
0x000286B4 SPI_PS_INPUT_CNTL_28
0x000286B8 SPI_PS_INPUT_CNTL_29
0x000286BC SPI_PS_INPUT_CNTL_30
0x000286C0 SPI_PS_INPUT_CNTL_31
0x000286C4 SPI_VS_OUT_CONFIG
0x000286C8 SPI_THREAD_GROUPING
0x000286CC SPI_PS_IN_CONTROL_0
0x000286D0 SPI_PS_IN_CONTROL_1
0x000286D4 SPI_INTERP_CONTROL_0
0x000286D8 SPI_INPUT_Z
0x000286DC SPI_FOG_CNTL
0x000286E0 SPI_BARYC_CNTL
0x000286E4 SPI_PS_IN_CONTROL_2
0x000286E8 SPI_COMPUTE_INPUT_CNTL
0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
0x000286F8 GDS_ADDR_SIZE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
0x0002878C CB_BLEND3_CONTROL
0x00028790 CB_BLEND4_CONTROL
0x00028794 CB_BLEND5_CONTROL
0x00028798 CB_BLEND6_CONTROL
0x0002879C CB_BLEND7_CONTROL
0x000287CC CS_COPY_STATE
0x000287D0 GFX_COPY_STATE
0x000287D4 PA_CL_POINT_X_RAD
0x000287D8 PA_CL_POINT_Y_RAD
0x000287DC PA_CL_POINT_SIZE
0x000287E0 PA_CL_POINT_CULL_RAD
0x00028808 CB_COLOR_CONTROL
0x0002880C DB_SHADER_CONTROL
0x00028810 PA_CL_CLIP_CNTL
0x00028814 PA_SU_SC_MODE_CNTL
0x00028818 PA_CL_VTE_CNTL
0x0002881C PA_CL_VS_OUT_CNTL
0x00028820 PA_CL_NANINF_CNTL
0x00028824 PA_SU_LINE_STIPPLE_CNTL
0x00028828 PA_SU_LINE_STIPPLE_SCALE
0x0002882C PA_SU_PRIM_FILTER_CNTL
0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
0x00028844 SQ_PGM_RESOURCES_PS
0x00028848 SQ_PGM_RESOURCES_2_PS
0x0002884C SQ_PGM_EXPORTS_PS
0x0002885C SQ_PGM_RESOURCES_VS
0x00028860 SQ_PGM_RESOURCES_2_VS
0x00028878 SQ_PGM_RESOURCES_GS
0x0002887C SQ_PGM_RESOURCES_2_GS
0x00028890 SQ_PGM_RESOURCES_ES
0x00028894 SQ_PGM_RESOURCES_2_ES
0x000288A8 SQ_PGM_RESOURCES_FS
0x000288BC SQ_PGM_RESOURCES_HS
0x000288C0 SQ_PGM_RESOURCES_2_HS
0x000288D0 SQ_PGM_RESOURCES_LS
0x000288D4 SQ_PGM_RESOURCES_2_LS
0x000288E8 SQ_LDS_ALLOC
0x000288EC SQ_LDS_ALLOC_PS
0x000288F0 SQ_VTX_SEMANTIC_CLEAR
0x00028A00 PA_SU_POINT_SIZE
0x00028A04 PA_SU_POINT_MINMAX
0x00028A08 PA_SU_LINE_CNTL
0x00028A0C PA_SC_LINE_STIPPLE
0x00028A10 VGT_OUTPUT_PATH_CNTL
0x00028A14 VGT_HOS_CNTL
0x00028A18 VGT_HOS_MAX_TESS_LEVEL
0x00028A1C VGT_HOS_MIN_TESS_LEVEL
0x00028A20 VGT_HOS_REUSE_DEPTH
0x00028A24 VGT_GROUP_PRIM_TYPE
0x00028A28 VGT_GROUP_FIRST_DECR
0x00028A2C VGT_GROUP_DECR
0x00028A30 VGT_GROUP_VECT_0_CNTL
0x00028A34 VGT_GROUP_VECT_1_CNTL
0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A48 PA_SC_MODE_CNTL_0
0x00028A4C PA_SC_MODE_CNTL_1
0x00028A50 VGT_ENHANCE
0x00028A54 VGT_GS_PER_ES
0x00028A58 VGT_ES_PER_GS
0x00028A5C VGT_GS_PER_VS
0x00028A6C VGT_GS_OUT_PRIM_TYPE
0x00028A84 VGT_PRIMITIVEID_EN
0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
0x00028AA0 VGT_INSTANCE_STEP_RATE_0
0x00028AA4 VGT_INSTANCE_STEP_RATE_1
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
0x00028ABC DB_HTILE_SURFACE
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
0x00028B38 VGT_GS_MAX_VERT_OUT
0x00028B54 VGT_SHADER_STAGES_EN
0x00028B58 VGT_LS_HS_CONFIG
0x00028B5C VGT_LS_SIZE
0x00028B60 VGT_HS_SIZE
0x00028B64 VGT_LS_HS_ALLOC
0x00028B68 VGT_HS_PATCH_CONST
0x00028B6C VGT_TF_PARAM
0x00028B70 DB_ALPHA_TO_MASK
0x00028B74 VGT_DISPATCH_INITIATOR
0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
0x00028B7C PA_SU_POLY_OFFSET_CLAMP
0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
0x00028B74 VGT_GS_INSTANCE_CNT
0x00028C00 PA_SC_LINE_CNTL
0x00028C08 PA_SU_VTX_CNTL
0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
0x00028C10 PA_CL_GB_VERT_DISC_ADJ
0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
0x00028C3C PA_SC_AA_MASK
0x00028C8C CB_COLOR0_CLEAR_WORD0
0x00028C90 CB_COLOR0_CLEAR_WORD1
0x00028C94 CB_COLOR0_CLEAR_WORD2
0x00028C98 CB_COLOR0_CLEAR_WORD3
0x00028CC8 CB_COLOR1_CLEAR_WORD0
0x00028CCC CB_COLOR1_CLEAR_WORD1
0x00028CD0 CB_COLOR1_CLEAR_WORD2
0x00028CD4 CB_COLOR1_CLEAR_WORD3
0x00028D04 CB_COLOR2_CLEAR_WORD0
0x00028D08 CB_COLOR2_CLEAR_WORD1
0x00028D0C CB_COLOR2_CLEAR_WORD2
0x00028D10 CB_COLOR2_CLEAR_WORD3
0x00028D40 CB_COLOR3_CLEAR_WORD0
0x00028D44 CB_COLOR3_CLEAR_WORD1
0x00028D48 CB_COLOR3_CLEAR_WORD2
0x00028D4C CB_COLOR3_CLEAR_WORD3
0x00028D7C CB_COLOR4_CLEAR_WORD0
0x00028D80 CB_COLOR4_CLEAR_WORD1
0x00028D84 CB_COLOR4_CLEAR_WORD2
0x00028D88 CB_COLOR4_CLEAR_WORD3
0x00028DB8 CB_COLOR5_CLEAR_WORD0
0x00028DBC CB_COLOR5_CLEAR_WORD1
0x00028DC0 CB_COLOR5_CLEAR_WORD2
0x00028DC4 CB_COLOR5_CLEAR_WORD3
0x00028DF4 CB_COLOR6_CLEAR_WORD0
0x00028DF8 CB_COLOR6_CLEAR_WORD1
0x00028DFC CB_COLOR6_CLEAR_WORD2
0x00028E00 CB_COLOR6_CLEAR_WORD3
0x00028E30 CB_COLOR7_CLEAR_WORD0
0x00028E34 CB_COLOR7_CLEAR_WORD1
0x00028E38 CB_COLOR7_CLEAR_WORD2
0x00028E3C CB_COLOR7_CLEAR_WORD3
0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
0x0003CFF0 SQ_VTX_BASE_VTX_LOC
0x0003CFF4 SQ_VTX_START_INST_LOC
0x0003FF00 SQ_TEX_SAMPLER_CLEAR
0x0003FF04 SQ_TEX_RESOURCE_CLEAR
0x0003FF08 SQ_LOOP_BOOL_CLEAR

View File

@ -74,7 +74,8 @@ void rs600_pm_misc(struct radeon_device *rdev)
if (voltage->delay)
udelay(voltage->delay);
}
}
} else if (voltage->type == VOLTAGE_VDDC)
radeon_atom_set_voltage(rdev, voltage->vddc_id);
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);

View File

@ -44,7 +44,12 @@ void rv770_fini(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
radeon_atom_set_voltage(rdev, voltage->voltage);
}
/*

View File

@ -77,7 +77,7 @@ struct ttm_page_pool {
/**
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
* so forcing serialiazation to access them is pointless.
* so forcing serialization to access them is pointless.
*/
struct ttm_pool_opts {
@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
printk(KERN_ERR "[ttm] Setting allocation size to %lu "
"is not allowed. Recomended size is "
"%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
printk(KERN_ERR TTM_PFX
"Setting allocation size to %lu "
"is not allowed. Recommended size is "
"%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
printk(KERN_WARNING "[ttm] Setting allocation size to "
"larger than %lu is not recomended.\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
printk(KERN_WARNING TTM_PFX
"Setting allocation size to "
"larger than %lu is not recommended.\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
printk(KERN_ERR TTM_PFX
"Failed to allocate memory for pool free operation.\n");
return 0;
}
@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void)
}
/**
* Calback for mm to request pool to reduce number of page held.
* Callback for mm to request pool to reduce number of page held.
*/
static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
{
@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
cpages);
printk(KERN_ERR TTM_PFX
"Failed to set %d pages to uc!\n",
cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
cpages);
printk(KERN_ERR TTM_PFX
"Failed to set %d pages to wc!\n",
cpages);
break;
default:
break;
@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
struct page **failed_pages, unsigned cpages)
{
unsigned i;
/* Failed pages has to be reed */
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
list_del(&failed_pages[i]->lru);
__free_page(failed_pages[i]);
@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
printk(KERN_ERR TTM_PFX
"Unable to allocate table for new pages.");
return -ENOMEM;
}
@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR "[ttm] unable to get page %u\n", i);
printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
r = ttm_set_pages_caching(caching_array, cstate, cpages);
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
printk(KERN_ERR TTM_PFX
"Failed to fill pool (%p).", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
@ -671,13 +679,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= __GFP_HIGHMEM;
gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR "[ttm] unable to allocate page.");
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}
@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags,
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
printk(KERN_ERR "[ttm] Failed to allocate extra pages "
"for large request.");
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
ttm_put_pages(pages, 0, flags, cstate);
return r;
}
@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
return 0;
printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
@ -813,7 +823,7 @@ void ttm_page_alloc_fini()
if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
return;
printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
ttm_pool_mm_shrink_fini(&_manager);
for (i = 0; i < NUM_POOLS; ++i)

View File

@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o
vmwgfx_overlay.o vmwgfx_fence.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o

View File

@ -88,6 +88,9 @@
#define DRM_IOCTL_VMW_FENCE_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
struct drm_vmw_fence_wait_arg)
#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
struct drm_vmw_update_layout_arg)
/**
@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
DRM_AUTH | DRM_UNLOCKED)
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
};
static struct pci_device_id vmw_pci_id_list[] = {
@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err3;
}
/* Need mmio memory to check for fifo pitchlock cap. */
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
!(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
goto out_err4;
}
dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12);
@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
unregister_pm_notifier(&dev_priv->pm_nb);
vmw_fb_close(dev_priv);
@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev,
{
struct vmw_master *vmaster;
DRM_INFO("Master create.\n");
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (unlikely(vmaster == NULL))
return -ENOMEM;
@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev,
{
struct vmw_master *vmaster = vmw_master(master);
DRM_INFO("Master destroy.\n");
master->driver_priv = NULL;
kfree(vmaster);
}
@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
DRM_INFO("Master set.\n");
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
DRM_INFO("Master drop.\n");
/**
* Make sure the master doesn't disappear while we have
* it locked.

View File

@ -41,12 +41,13 @@
#define VMWGFX_DRIVER_DATE "20100209"
#define VMWGFX_DRIVER_MAJOR 1
#define VMWGFX_DRIVER_MINOR 0
#define VMWGFX_DRIVER_MINOR 2
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_GMRS 2048
#define VMWGFX_MAX_DISPLAYS 16
struct vmw_fpriv {
struct drm_master *locked_master;
@ -102,6 +103,13 @@ struct vmw_surface {
struct vmw_cursor_snooper snooper;
};
struct vmw_fence_queue {
struct list_head head;
struct timespec lag;
struct timespec lag_time;
spinlock_t lock;
};
struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
@ -115,6 +123,7 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
struct vmw_fence_queue fence_queue;
};
struct vmw_relocation {
@ -144,6 +153,14 @@ struct vmw_master {
struct ttm_lock lock;
};
struct vmw_vga_topology_state {
uint32_t width;
uint32_t height;
uint32_t primary;
uint32_t pos_x;
uint32_t pos_y;
};
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@ -171,14 +188,19 @@ struct vmw_private {
* VGA registers.
*/
struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
uint32_t vga_width;
uint32_t vga_height;
uint32_t vga_depth;
uint32_t vga_bpp;
uint32_t vga_pseudo;
uint32_t vga_red_mask;
uint32_t vga_blue_mask;
uint32_t vga_green_mask;
uint32_t vga_blue_mask;
uint32_t vga_bpl;
uint32_t vga_pitchlock;
uint32_t num_displays;
/*
* Framebuffer info.
@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
/**
* TTM glue - vmwgfx_ttm_glue.c
@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t sequence,
bool interruptible,
unsigned long timeout);
extern void vmw_update_sequence(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state);
/**
* Rudimentary fence objects currently used only for throttling -
* vmwgfx_fence.c
*/
extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
extern int vmw_fence_push(struct vmw_fence_queue *queue,
uint32_t sequence);
extern int vmw_fence_pull(struct vmw_fence_queue *queue,
uint32_t signaled_sequence);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_fence_queue *queue, uint32_t us);
/**
* Kernel framebuffer - vmwgfx_fb.c
@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bbp, unsigned depth);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/**
* Overlay control - vmwgfx_overlay.c

View File

@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out_err;
vmw_apply_relocations(sw_context);
if (arg->throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
arg->throttle_us);
if (unlikely(ret != 0))
goto out_err;
}
vmw_fifo_commit(dev_priv, arg->command_size);
ret = vmw_fifo_send_fence(dev_priv, &sequence);

View File

@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
/* without multimon its hard to resize */
if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
(var->xres != par->max_width ||
var->yres != par->max_height)) {
DRM_ERROR("Tried to resize, but we don't have multimon\n");
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
(var->xoffset != 0 || var->yoffset != 0)) {
DRM_ERROR("Can not handle panning without display topology\n");
return -EINVAL;
}
if (var->xres > par->max_width ||
var->yres > par->max_height) {
if ((var->xoffset + var->xres) > par->max_width ||
(var->yoffset + var->yres) > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
}
@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info)
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
info->fix.line_length,
par->bpp, par->depth);
if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
/* TODO check if pitch and offset changes */
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
} else {
vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
/* TODO check if pitch and offset changes */
}
/* This is really helpful since if this fails the user
* can probably not see anything on the screen.
*/
WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
return 0;
}
@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
/* XXX These shouldn't be hardcoded. */
initial_width = 800;
initial_height = 600;
fb_bbp = 32;
fb_depth = 24;
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
} else {
fb_width = min(vmw_priv->fb_max_width, initial_width);
fb_height = min(vmw_priv->fb_max_height, initial_height);
}
/* XXX As shouldn't these be as well. */
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
initial_width = min(fb_width, initial_width);
initial_height = min(fb_height, initial_height);
vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
fb_pitch = fb_width * fb_bbp / 8;
fb_size = fb_pitch * fb_height;
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
DRM_DEBUG("fb_pitch %u\n", fb_pitch);
DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
@ -659,6 +616,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
goto err_unlock;
ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
/* Could probably bug on */
WARN_ON(bo->offset != 0);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);

View File

@ -0,0 +1,173 @@
/**************************************************************************
*
* Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
struct vmw_fence {
struct list_head head;
uint32_t sequence;
struct timespec submitted;
};
void vmw_fence_queue_init(struct vmw_fence_queue *queue)
{
INIT_LIST_HEAD(&queue->head);
queue->lag = ns_to_timespec(0);
getrawmonotonic(&queue->lag_time);
spin_lock_init(&queue->lock);
}
void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
{
struct vmw_fence *fence, *next;
spin_lock(&queue->lock);
list_for_each_entry_safe(fence, next, &queue->head, head) {
kfree(fence);
}
spin_unlock(&queue->lock);
}
int vmw_fence_push(struct vmw_fence_queue *queue,
uint32_t sequence)
{
struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(!fence))
return -ENOMEM;
fence->sequence = sequence;
getrawmonotonic(&fence->submitted);
spin_lock(&queue->lock);
list_add_tail(&fence->head, &queue->head);
spin_unlock(&queue->lock);
return 0;
}
int vmw_fence_pull(struct vmw_fence_queue *queue,
uint32_t signaled_sequence)
{
struct vmw_fence *fence, *next;
struct timespec now;
bool updated = false;
spin_lock(&queue->lock);
getrawmonotonic(&now);
if (list_empty(&queue->head)) {
queue->lag = ns_to_timespec(0);
queue->lag_time = now;
updated = true;
goto out_unlock;
}
list_for_each_entry_safe(fence, next, &queue->head, head) {
if (signaled_sequence - fence->sequence > (1 << 30))
continue;
queue->lag = timespec_sub(now, fence->submitted);
queue->lag_time = now;
updated = true;
list_del(&fence->head);
kfree(fence);
}
out_unlock:
spin_unlock(&queue->lock);
return (updated) ? 0 : -EBUSY;
}
static struct timespec vmw_timespec_add(struct timespec t1,
struct timespec t2)
{
t1.tv_sec += t2.tv_sec;
t1.tv_nsec += t2.tv_nsec;
if (t1.tv_nsec >= 1000000000L) {
t1.tv_sec += 1;
t1.tv_nsec -= 1000000000L;
}
return t1;
}
static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
{
struct timespec now;
spin_lock(&queue->lock);
getrawmonotonic(&now);
queue->lag = vmw_timespec_add(queue->lag,
timespec_sub(now, queue->lag_time));
queue->lag_time = now;
spin_unlock(&queue->lock);
return queue->lag;
}
static bool vmw_lag_lt(struct vmw_fence_queue *queue,
uint32_t us)
{
struct timespec lag, cond;
cond = ns_to_timespec((s64) us * 1000);
lag = vmw_fifo_lag(queue);
return (timespec_compare(&lag, &cond) < 1);
}
int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_fence_queue *queue, uint32_t us)
{
struct vmw_fence *fence;
uint32_t sequence;
int ret;
while (!vmw_lag_lt(queue, us)) {
spin_lock(&queue->lock);
if (list_empty(&queue->head))
sequence = atomic_read(&dev_priv->fence_seq);
else {
fence = list_first_entry(&queue->head,
struct vmw_fence, head);
sequence = fence->sequence;
}
spin_unlock(&queue->lock);
ret = vmw_wait_fence(dev_priv, false, sequence, true,
3*HZ);
if (unlikely(ret != 0))
return ret;
(void) vmw_fence_pull(queue, sequence);
}
return 0;
}

View File

@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
return true;
}
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t caps;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
return true;
return false;
}
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
vmw_fence_queue_init(&fifo->fence_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
out_err:
vfree(fifo->static_buffer);
@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->enable_state);
mutex_unlock(&dev_priv->hw_mutex);
vmw_fence_queue_takedown(&fifo->fence_queue);
if (likely(fifo->last_buffer != NULL)) {
vfree(fifo->last_buffer);
@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fifo_state->last_buffer_add = true;
vmw_fifo_commit(dev_priv, bytes);
fifo_state->last_buffer_add = false;
(void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
vmw_update_sequence(dev_priv, fifo_state);
out_err:
return ret;

View File

@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
return (busy == 0);
}
void vmw_update_sequence(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (dev_priv->last_read_sequence != sequence) {
dev_priv->last_read_sequence = sequence;
vmw_fence_pull(&fifo_state->fence_queue, sequence);
}
}
bool vmw_fence_signaled(struct vmw_private *dev_priv,
uint32_t sequence)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
struct vmw_fifo_state *fifo_state;
bool ret;
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
fifo_state = &dev_priv->fifo;
vmw_update_sequence(dev_priv, fifo_state);
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
fifo_state = &dev_priv->fifo;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, sequence))
return true;

View File

@ -30,6 +30,8 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
struct delayed_work d_work;
struct mutex work_lock;
bool present_fs;
@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
vfbs->base.base.depth = 24;
vfbs->base.base.width = width;
vfbs->base.base.height = height;
vfbs->base.pin = NULL;
vfbs->base.unpin = NULL;
vfbs->base.pin = &vmw_surface_dmabuf_pin;
vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
vfbs->surface = surface;
mutex_init(&vfbs->work_lock);
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.create_handle = vmw_framebuffer_create_handle,
};
static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(&vfb->base);
unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
int ret;
vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
if (unlikely(vfbs->buffer == NULL))
return -ENOMEM;
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
vmw_overlay_resume_all(dev_priv);
return ret;
}
static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
{
struct ttm_buffer_object *bo;
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(&vfb->base);
bo = &vfbs->buffer->base;
ttm_bo_unref(&bo);
vfbs->buffer = NULL;
return 0;
}
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
} else
WARN_ON(true);
vmw_overlay_resume_all(dev_priv);
WARN_ON(ret != 0);
return 0;
}
@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */
vfbd->base.base.bits_per_pixel = 32;
vfbd->base.base.pitch = width * 32 / 4;
vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
vfbd->base.base.depth = 24;
vfbd->base.base.width = width;
vfbd->base.base.height = height;
@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
dev->mode_config.max_width = dev_priv->fb_max_width;
dev->mode_config.max_height = dev_priv->fb_max_height;
/* assumed largest fb size */
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
ret = vmw_kms_init_legacy_display_system(dev_priv);
@ -826,49 +846,140 @@ out:
return ret;
}
void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bbp, unsigned depth)
{
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
}
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
/*
* setup a single multimon monitor with the size
* of 0x0, this stops the UI from resizing when we
* change the framebuffer size
*/
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
struct vmw_vga_topology_state *save;
uint32_t i;
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
SVGA_FIFO_PITCHLOCK);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
vmw_priv->num_displays = vmw_read(vmw_priv,
SVGA_REG_NUM_GUEST_DISPLAYS);
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
return 0;
}
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
struct vmw_vga_topology_state *save;
uint32_t i;
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_priv->vga_pitchlock);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(vmw_priv->vga_pitchlock,
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
/* TODO check for multimon */
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
return 0;
}
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects;
struct drm_vmw_rect *rects;
unsigned rects_size;
int ret;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
goto out_unlock;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
rects = kzalloc(rects_size, GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
}
user_rects = (void __user *)(unsigned long)arg->rects;
ret = copy_from_user(rects, user_rects, rects_size);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to get rects.\n");
goto out_free;
}
vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
out_free:
kfree(rects);
out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}

View File

@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
/*
* Legacy display unit functions - vmwgfx_ldu.h
* Legacy display unit functions - vmwgfx_ldu.c
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects);
#endif

View File

@ -38,6 +38,7 @@ struct vmw_legacy_display {
struct list_head active;
unsigned num_active;
unsigned last_num_active;
struct vmw_framebuffer *fb;
};
@ -48,9 +49,12 @@ struct vmw_legacy_display {
struct vmw_legacy_display_unit {
struct vmw_display_unit base;
struct list_head active;
unsigned pref_width;
unsigned pref_height;
bool pref_active;
struct drm_display_mode *pref_mode;
unsigned unit;
struct list_head active;
};
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
struct drm_crtc *crtc;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
int i = 0;
/* to stop the screen from changing size on resize */
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
for (i = 0; i < lds->num_active; i++) {
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
*/
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
int w = 0, h = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
w = max(w, crtc->x + crtc->mode.hdisplay);
h = max(h, crtc->y + crtc->mode.vdisplay);
i++;
}
if (crtc == NULL)
return 0;
fb = entry->base.crtc.fb;
vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
fb->bits_per_pixel, fb->depth);
return 0;
}
/* Now set the mode */
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
if (!list_empty(&lds->active)) {
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.fb;
vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
fb->bits_per_pixel, fb->depth);
}
/* Make sure we always show something. */
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
lds->num_active ? lds->num_active : 1);
i = 0;
list_for_each_entry(entry, &lds->active, active) {
crtc = &entry->base.crtc;
@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
i++;
}
BUG_ON(i != lds->num_active);
lds->last_num_active = lds->num_active;
return 0;
}
@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
if (list_empty(&ldu->active))
return 0;
/* Must init otherwise list_empty(&ldu->active) will not work. */
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *entry;
struct list_head *at;
BUG_ON(!ld->num_active && ld->fb);
if (vfb != ld->fb) {
if (ld->fb && ld->fb->unpin)
ld->fb->unpin(ld->fb);
if (vfb->pin)
vfb->pin(vfb);
ld->fb = vfb;
}
if (!list_empty(&ldu->active))
return 0;
at = &ld->active;
list_for_each_entry(entry, &ld->active, active) {
if (entry->unit > ldu->unit)
if (entry->base.unit > ldu->base.unit)
break;
at = &entry->active;
}
list_add(&ldu->active, at);
if (ld->num_active++ == 0) {
BUG_ON(ld->fb);
if (vfb->pin)
vfb->pin(vfb);
ld->fb = vfb;
}
ld->num_active++;
return 0;
}
@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
/* ldu only supports one fb active at the time */
if (dev_priv->ldu_priv->fb && vfb &&
!(dev_priv->ldu_priv->num_active == 1 &&
!list_empty(&ldu->active)) &&
dev_priv->ldu_priv->fb != vfb) {
DRM_ERROR("Multiple framebuffers not supported\n");
return -EINVAL;
@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
static enum drm_connector_status
vmw_ldu_connector_detect(struct drm_connector *connector)
{
/* XXX vmwctrl should control connection status */
if (vmw_connector_to_ldu(connector)->base.unit == 0)
if (vmw_connector_to_ldu(connector)->pref_active)
return connector_status_connected;
return connector_status_disconnected;
}
@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
/* Add preferred mode */
{
mode = drm_mode_duplicate(dev, &prefmode);
if (!mode)
return 0;
mode->hdisplay = ldu->pref_width;
mode->vdisplay = ldu->pref_height;
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_probed_add(connector, mode);
if (ldu->pref_mode) {
list_del_init(&ldu->pref_mode->head);
drm_mode_destroy(dev, ldu->pref_mode);
}
ldu->pref_mode = mode;
}
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
vmw_ldu_connector_builtin[i].vdisplay > max_height)
@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
if (!ldu)
return -ENOMEM;
ldu->unit = unit;
ldu->base.unit = unit;
crtc = &ldu->base.crtc;
encoder = &ldu->base.encoder;
connector = &ldu->base.connector;
INIT_LIST_HEAD(&ldu->active);
ldu->pref_active = (unit == 0);
ldu->pref_width = 800;
ldu->pref_height = 600;
ldu->pref_mode = NULL;
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
/* Initial status */
if (unit == 0)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
connector->status = vmw_ldu_connector_detect(connector);
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
INIT_LIST_HEAD(&ldu->active);
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
drm_connector_attach_property(connector,
@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
dev_priv->ldu_priv->num_active = 0;
dev_priv->ldu_priv->last_num_active = 0;
dev_priv->ldu_priv->fb = NULL;
drm_mode_create_dirty_info_property(dev_priv->dev);
vmw_ldu_init(dev_priv, 0);
vmw_ldu_init(dev_priv, 1);
vmw_ldu_init(dev_priv, 2);
vmw_ldu_init(dev_priv, 3);
vmw_ldu_init(dev_priv, 4);
vmw_ldu_init(dev_priv, 5);
vmw_ldu_init(dev_priv, 6);
vmw_ldu_init(dev_priv, 7);
/* for old hardware without multimon only enable one display */
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
vmw_ldu_init(dev_priv, 1);
vmw_ldu_init(dev_priv, 2);
vmw_ldu_init(dev_priv, 3);
vmw_ldu_init(dev_priv, 4);
vmw_ldu_init(dev_priv, 5);
vmw_ldu_init(dev_priv, 6);
vmw_ldu_init(dev_priv, 7);
}
return 0;
}
@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
return 0;
}
int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_legacy_display_unit *ldu;
struct drm_connector *con;
int i;
mutex_lock(&dev->mode_config.mutex);
#if 0
DRM_INFO("%s: new layout ", __func__);
for (i = 0; i < (int)num; i++)
DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
rects[i].w, rects[i].h);
DRM_INFO("\n");
#else
(void)i;
#endif
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
ldu = vmw_connector_to_ldu(con);
if (num > ldu->base.unit) {
ldu->pref_width = rects[ldu->base.unit].w;
ldu->pref_height = rects[ldu->base.unit].h;
ldu->pref_active = true;
} else {
ldu->pref_width = 800;
ldu->pref_height = 600;
ldu->pref_active = false;
}
con->status = vmw_ldu_connector_detect(con);
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
}

View File

@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
if (stream->buf != buf)
stream->buf = vmw_dmabuf_reference(buf);
stream->saved = *arg;
/* stream is no longer stopped/paused */
stream->paused = false;
return 0;
}

View File

@ -1,12 +1,32 @@
/*
* vgaarb.c
* vgaarb.c: Implements the VGA arbitration. For details refer to
* Documentation/vgaarbiter.txt
*
*
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
*
* Implements the VGA arbitration. For details refer to
* Documentation/vgaarbiter.txt
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/module.h>
@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
(vgadev->decodes & VGA_RSRC_LEGACY_MEM))
rsrc |= VGA_RSRC_LEGACY_MEM;
pr_devel("%s: %d\n", __func__, rsrc);
pr_devel("%s: owns: %d\n", __func__, vgadev->owns);
pr_debug("%s: %d\n", __func__, rsrc);
pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
/* Check what resources we need to acquire */
wants = rsrc & ~vgadev->owns;
@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
{
unsigned int old_locks = vgadev->locks;
pr_devel("%s\n", __func__);
pr_debug("%s\n", __func__);
/* Update our counters, and account for equivalent legacy resources
* if we decode them
@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
else
vga_decode_count--;
}
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 5;
remaining -= 5;
pr_devel("client 0x%p called 'lock'\n", priv);
pr_debug("client 0x%p called 'lock'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 7;
remaining -= 7;
pr_devel("client 0x%p called 'unlock'\n", priv);
pr_debug("client 0x%p called 'unlock'\n", priv);
if (strncmp(curr_pos, "all", 3) == 0)
io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 8;
remaining -= 8;
pr_devel("client 0x%p called 'trylock'\n", priv);
pr_debug("client 0x%p called 'trylock'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
curr_pos += 7;
remaining -= 7;
pr_devel("client 0x%p called 'target'\n", priv);
pr_debug("client 0x%p called 'target'\n", priv);
/* if target is default */
if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
ret_val = -EPROTO;
goto done;
}
pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
pbus = pci_find_bus(domain, bus);
pr_devel("vgaarb: pbus %p\n", pbus);
pr_debug("vgaarb: pbus %p\n", pbus);
if (pbus == NULL) {
pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
domain, bus);
@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
goto done;
}
pdev = pci_get_slot(pbus, devfn);
pr_devel("vgaarb: pdev %p\n", pdev);
pr_debug("vgaarb: pdev %p\n", pdev);
if (!pdev) {
pr_err("vgaarb: invalid PCI address %x:%x\n",
bus, devfn);
@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
}
vgadev = vgadev_find(pdev);
pr_devel("vgaarb: vgadev %p\n", vgadev);
pr_debug("vgaarb: vgadev %p\n", vgadev);
if (vgadev == NULL) {
pr_err("vgaarb: this pci device is not a vga device\n");
pci_dev_put(pdev);
@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
curr_pos += 8;
remaining -= 8;
pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv);
pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
{
struct vga_arb_private *priv = file->private_data;
pr_devel("%s\n", __func__);
pr_debug("%s\n", __func__);
if (priv == NULL)
return -ENODEV;
@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file)
struct vga_arb_private *priv;
unsigned long flags;
pr_devel("%s\n", __func__);
pr_debug("%s\n", __func__);
priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
if (priv == NULL)
@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
unsigned long flags;
int i;
pr_devel("%s\n", __func__);
pr_debug("%s\n", __func__);
if (priv == NULL)
return -ENODEV;
@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
uc = &priv->cards[i];
if (uc->pdev == NULL)
continue;
pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n",
pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
uc->io_cnt, uc->mem_cnt);
while (uc->io_cnt--)
vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
struct pci_dev *pdev = to_pci_dev(dev);
bool notify = false;
pr_devel("%s\n", __func__);
pr_debug("%s\n", __func__);
/* For now we're only intereted in devices added and removed. I didn't
* test this thing here, so someone needs to double check for the

View File

@ -130,4 +130,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev);
extern void drm_kms_helper_poll_init(struct drm_device *dev);
extern void drm_kms_helper_poll_fini(struct drm_device *dev);
extern void drm_helper_hpd_irq_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
#endif

View File

@ -79,6 +79,7 @@ struct drm_nouveau_gpuobj_free {
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;

View File

@ -50,6 +50,8 @@
#define DRM_VMW_EXECBUF 12
#define DRM_VMW_FIFO_DEBUG 13
#define DRM_VMW_FENCE_WAIT 14
/* guarded by minor version >= 2 */
#define DRM_VMW_UPDATE_LAYOUT 15
/*************************************************************************/
@ -585,4 +587,28 @@ struct drm_vmw_stream_arg {
* sure that the stream has been stopped.
*/
/*************************************************************************/
/**
* DRM_VMW_UPDATE_LAYOUT - Update layout
*
* Updates the prefered modes and connection status for connectors. The
* command conisits of one drm_vmw_update_layout_arg pointing out a array
* of num_outputs drm_vmw_rect's.
*/
/**
* struct drm_vmw_update_layout_arg
*
* @num_outputs: number of active
* @rects: pointer to array of drm_vmw_rect
*
* Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
*/
struct drm_vmw_update_layout_arg {
uint32_t num_outputs;
uint32_t pad64;
uint64_t rects;
};
#endif

View File

@ -5,6 +5,27 @@
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef LINUX_VGA_H