diff --git a/MAINTAINERS b/MAINTAINERS index bbcaf31657ae..debde0128cd0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2060,6 +2060,15 @@ S: Maintained F: drivers/gpu/drm/ F: include/drm/ +INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) +M: Chris Wilson +L: intel-gfx@lists.freedesktop.org +L: dri-devel@lists.freedesktop.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git +S: Supported +F: drivers/gpu/drm/i915 +F: include/drm/i915* + DSCC4 DRIVER M: Francois Romieu L: netdev@vger.kernel.org diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index 627f542827c7..8eb56e273e75 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o obj-$(CONFIG_AGP_PARISC) += parisc-agp.o obj-$(CONFIG_AGP_I460) += i460-agp.o obj-$(CONFIG_AGP_INTEL) += intel-agp.o +obj-$(CONFIG_AGP_INTEL) += intel-gtt.o obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o obj-$(CONFIG_AGP_SIS) += sis-agp.o diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 120490949997..5259065f3c79 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -121,11 +121,6 @@ struct agp_bridge_driver { void (*agp_destroy_pages)(struct agp_memory *); int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); void (*chipset_flush)(struct agp_bridge_data *); - - int (*agp_map_page)(struct page *page, dma_addr_t *ret); - void (*agp_unmap_page)(struct page *page, dma_addr_t dma); - int (*agp_map_memory)(struct agp_memory *mem); - void (*agp_unmap_memory)(struct agp_memory *mem); }; struct agp_bridge_data { diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index b6b1568314c8..b1b4362bc648 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -309,7 +309,8 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type) num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; - if (type != 0 || mem->type != 0) + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) return -EINVAL; if ((pg_start + mem->page_count) > num_entries) @@ -348,7 +349,8 @@ static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type) unsigned long __iomem *cur_gatt; unsigned long addr; - if (type != 0 || mem->type != 0) + if (type != mem->type || + agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) return -EINVAL; for (i = pg_start; i < (mem->page_count + pg_start); i++) { diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index ee4f855611b6..f27d0d0816d3 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -151,17 +151,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) } bridge->scratch_page_page = page; - if (bridge->driver->agp_map_page) { - if (bridge->driver->agp_map_page(page, - &bridge->scratch_page_dma)) { - dev_err(&bridge->dev->dev, - "unable to dma-map scratch page\n"); - rc = -ENOMEM; - goto err_out_nounmap; - } - } else { - bridge->scratch_page_dma = page_to_phys(page); - } + bridge->scratch_page_dma = page_to_phys(page); bridge->scratch_page = bridge->driver->mask_memory(bridge, bridge->scratch_page_dma, 0); @@ -204,12 +194,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) return 0; err_out: - if (bridge->driver->needs_scratch_page && - bridge->driver->agp_unmap_page) { - bridge->driver->agp_unmap_page(bridge->scratch_page_page, - bridge->scratch_page_dma); - } -err_out_nounmap: if (bridge->driver->needs_scratch_page) { void *va = page_address(bridge->scratch_page_page); @@ -240,10 +224,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge) bridge->driver->needs_scratch_page) { void *va = page_address(bridge->scratch_page_page); - if (bridge->driver->agp_unmap_page) - bridge->driver->agp_unmap_page(bridge->scratch_page_page, - bridge->scratch_page_dma); - bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); } diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 64255cef8a7d..4956f1c8f9d5 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -437,11 +437,6 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) curr->is_flushed = true; } - if (curr->bridge->driver->agp_map_memory) { - ret_val = curr->bridge->driver->agp_map_memory(curr); - if (ret_val) - return ret_val; - } ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); if (ret_val != 0) @@ -483,9 +478,6 @@ int agp_unbind_memory(struct agp_memory *curr) if (ret_val != 0) return ret_val; - if (curr->bridge->driver->agp_unmap_memory) - curr->bridge->driver->agp_unmap_memory(curr); - curr->is_bound = false; curr->pg_start = 0; spin_lock(&curr->bridge->mapped_lock); diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index cd18493c9527..e72f49d52202 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -12,9 +12,6 @@ #include #include "agp.h" #include "intel-agp.h" -#include - -#include "intel-gtt.c" int intel_agp_enabled; EXPORT_SYMBOL(intel_agp_enabled); @@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = { .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; -static int find_gmch(u16 device) -{ - struct pci_dev *gmch_device; - - gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); - if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { - gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, - device, gmch_device); - } - - if (!gmch_device) - return 0; - - intel_private.pcidev = gmch_device; - return 1; -} - /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine * which one should be used if a gmch_chip_id is present. */ -static const struct intel_driver_description { +static const struct intel_agp_driver_description { unsigned int chip_id; - unsigned int gmch_chip_id; char *name; const struct agp_bridge_driver *driver; - const struct agp_bridge_driver *gmch_driver; } intel_agp_chipsets[] = { - { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", - &intel_815_driver, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", - &intel_830mp_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL }, - { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL }, - { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, - "GM45", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, - "Eaglelake", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, - "Q45/Q43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, - "G45/G43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, - "B43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG, - "B43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, - "G41", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { 0, 0, NULL, NULL, NULL } + { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver }, + { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver }, + { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver }, + { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver }, + { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver }, + { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver }, + { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver }, + { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver }, + { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver }, + { 0, NULL, NULL } }; -static int __devinit intel_gmch_probe(struct pci_dev *pdev, - struct agp_bridge_data *bridge) -{ - int i, mask; - - bridge->driver = NULL; - - for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { - if ((intel_agp_chipsets[i].gmch_chip_id != 0) && - find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { - bridge->driver = - intel_agp_chipsets[i].gmch_driver; - break; - } - } - - if (!bridge->driver) - return 0; - - bridge->dev_private_data = &intel_private; - bridge->dev = pdev; - - dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); - - if (bridge->driver->mask_memory == intel_gen6_mask_memory) - mask = 40; - else if (bridge->driver->mask_memory == intel_i965_mask_memory) - mask = 36; - else - mask = 32; - - if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) - dev_err(&intel_private.pcidev->dev, - "set gfx device dma mask %d-bit failed!\n", mask); - else - pci_set_consistent_dma_mask(intel_private.pcidev, - DMA_BIT_MASK(mask)); - - return 1; -} - static int __devinit agp_intel_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, } } - if (intel_agp_chipsets[i].name == NULL) { + if (!bridge->driver) { if (cap_ptr) dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n", pdev->vendor, pdev->device); @@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, return -ENODEV; } - if (!bridge->driver) { - if (cap_ptr) - dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", - intel_agp_chipsets[i].gmch_chip_id); - agp_put_bridge(bridge); - return -ENODEV; - } - bridge->dev = pdev; bridge->dev_private_data = NULL; @@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) agp_remove_bridge(bridge); - if (intel_private.pcidev) - pci_dev_put(intel_private.pcidev); + intel_gmch_remove(pdev); agp_put_bridge(bridge); } @@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_G45_HB), ID(PCI_DEVICE_ID_INTEL_G41_HB), ID(PCI_DEVICE_ID_INTEL_B43_HB), + ID(PCI_DEVICE_ID_INTEL_B43_1_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index d09b1ab7e8ab..90539df02504 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -215,44 +215,7 @@ #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A -/* cover 915 and 945 variants */ -#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB) - -#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) - -#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) - -#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) - -#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB) - -#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ - IS_SNB) - +int intel_gmch_probe(struct pci_dev *pdev, + struct agp_bridge_data *bridge); +void intel_gmch_remove(struct pci_dev *pdev); #endif diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 75e0a3497888..6b6760ea2435 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -15,6 +15,18 @@ * /fairy-tale-mode off */ +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" +#include "intel-agp.h" +#include +#include + /* * If we have Intel graphics, we're not going to have anything other than * an Intel IOMMU. So make the correct use of the PCI DMA API contingent @@ -23,11 +35,12 @@ */ #ifdef CONFIG_DMAR #define USE_PCI_DMA_API 1 +#else +#define USE_PCI_DMA_API 0 #endif /* Max amount of stolen space, anything above will be returned to Linux */ int intel_max_stolen = 32 * 1024 * 1024; -EXPORT_SYMBOL(intel_max_stolen); static const struct aper_size_info_fixed intel_i810_sizes[] = { @@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] = #define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 #define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 -static struct gatt_mask intel_gen6_masks[] = -{ - {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED, - .type = INTEL_AGP_UNCACHED_MEMORY }, - {.mask = I810_PTE_VALID | GEN6_PTE_LLC, - .type = INTEL_AGP_CACHED_MEMORY_LLC }, - {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT, - .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT }, - {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC, - .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC }, - {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT, - .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT }, +struct intel_gtt_driver { + unsigned int gen : 8; + unsigned int is_g33 : 1; + unsigned int is_pineview : 1; + unsigned int is_ironlake : 1; + unsigned int dma_mask_size : 8; + /* Chipset specific GTT setup */ + int (*setup)(void); + /* This should undo anything done in ->setup() save the unmapping + * of the mmio register file, that's done in the generic code. */ + void (*cleanup)(void); + void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags); + /* Flags is a more or less chipset specific opaque value. + * For chipsets that need to support old ums (non-gem) code, this + * needs to be identical to the various supported agp memory types! */ + bool (*check_flags)(unsigned int flags); + void (*chipset_flush)(void); }; static struct _intel_private { + struct intel_gtt base; + const struct intel_gtt_driver *driver; struct pci_dev *pcidev; /* device one */ + struct pci_dev *bridge_dev; u8 __iomem *registers; + phys_addr_t gtt_bus_addr; + phys_addr_t gma_bus_addr; + phys_addr_t pte_bus_addr; u32 __iomem *gtt; /* I915G */ int num_dcache_entries; - /* gtt_entries is the number of gtt entries that are already mapped - * to stolen memory. Stolen memory is larger than the memory mapped - * through gtt_entries, as it includes some reserved space for the BIOS - * popup and for the GTT. - */ - int gtt_entries; /* i830+ */ - int gtt_total_size; union { void __iomem *i9xx_flush_page; void *i8xx_flush_page; @@ -88,23 +105,14 @@ static struct _intel_private { struct page *i8xx_page; struct resource ifp_resource; int resource_valid; + struct page *scratch_page; + dma_addr_t scratch_page_dma; } intel_private; -#ifdef USE_PCI_DMA_API -static int intel_agp_map_page(struct page *page, dma_addr_t *ret) -{ - *ret = pci_map_page(intel_private.pcidev, page, 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(intel_private.pcidev, *ret)) - return -EINVAL; - return 0; -} - -static void intel_agp_unmap_page(struct page *page, dma_addr_t dma) -{ - pci_unmap_page(intel_private.pcidev, dma, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); -} +#define INTEL_GTT_GEN intel_private.driver->gen +#define IS_G33 intel_private.driver->is_g33 +#define IS_PINEVIEW intel_private.driver->is_pineview +#define IS_IRONLAKE intel_private.driver->is_ironlake static void intel_agp_free_sglist(struct agp_memory *mem) { @@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem) struct scatterlist *sg; int i; + if (mem->sg_list) + return 0; /* already mapped (for e.g. resume */ + DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) @@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem) intel_agp_free_sglist(mem); } -static void intel_agp_insert_sg_entries(struct agp_memory *mem, - off_t pg_start, int mask_type) -{ - struct scatterlist *sg; - int i, j; - - j = pg_start; - - WARN_ON(!mem->num_sg); - - if (mem->num_sg == mem->page_count) { - for_each_sg(mem->sg_list, sg, mem->page_count, i) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - sg_dma_address(sg), mask_type), - intel_private.gtt+j); - j++; - } - } else { - /* sg may merge pages, but we have to separate - * per-page addr for GTT */ - unsigned int len, m; - - for_each_sg(mem->sg_list, sg, mem->num_sg, i) { - len = sg_dma_len(sg) / PAGE_SIZE; - for (m = 0; m < len; m++) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - sg_dma_address(sg) + m * PAGE_SIZE, - mask_type), - intel_private.gtt+j); - j++; - } - } - } - readl(intel_private.gtt+j-1); -} - -#else - -static void intel_agp_insert_sg_entries(struct agp_memory *mem, - off_t pg_start, int mask_type) -{ - int i, j; - - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - page_to_phys(mem->pages[i]), mask_type), - intel_private.gtt+j); - } - - readl(intel_private.gtt+j-1); -} - -#endif - static int intel_i810_fetch_size(void) { u32 smram_miscc; struct aper_size_info_fixed *values; - pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); + pci_read_config_dword(intel_private.bridge_dev, + I810_SMRAM_MISCC, &smram_miscc); values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { - dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); + dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n"); return 0; } if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { @@ -284,7 +242,7 @@ static void intel_i810_cleanup(void) iounmap(intel_private.registers); } -static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) +static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { return; } @@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page) atomic_dec(&agp_bridge->current_memory_agp); } -static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, - int type) -{ - if (type < AGP_USER_TYPES) - return type; - else if (type == AGP_USER_CACHED_MEMORY) - return INTEL_AGP_CACHED_MEMORY; - else - return 0; -} - -static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge, - int type) -{ - unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT; - unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT; - - if (type_mask == AGP_USER_UNCACHED_MEMORY) - return INTEL_AGP_UNCACHED_MEMORY; - else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) - return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT : - INTEL_AGP_CACHED_MEMORY_LLC_MLC; - else /* set 'normal'/'cached' to LLC by default */ - return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT : - INTEL_AGP_CACHED_MEMORY_LLC; -} - - static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { @@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, return addr | bridge->driver->masks[type].mask; } -static struct aper_size_info_fixed intel_i830_sizes[] = +static int intel_gtt_setup_scratch_page(void) { + struct page *page; + dma_addr_t dma_addr; + + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + if (page == NULL) + return -ENOMEM; + get_page(page); + set_pages_uc(page, 1); + + if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { + dma_addr = pci_map_page(intel_private.pcidev, page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) + return -EINVAL; + + intel_private.scratch_page_dma = dma_addr; + } else + intel_private.scratch_page_dma = page_to_phys(page); + + intel_private.scratch_page = page; + + return 0; +} + +static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { {128, 32768, 5}, /* The 64M mode still requires a 128k gatt */ {64, 16384, 5}, @@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] = {512, 131072, 7}, }; -static void intel_i830_init_gtt_entries(void) +static unsigned int intel_gtt_stolen_entries(void) { u16 gmch_ctrl; - int gtt_entries = 0; u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; - int size; /* reserved space (in kb) at the top of stolen memory */ + unsigned int overhead_entries, stolen_entries; + unsigned int stolen_size = 0; - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); - if (IS_I965) { - u32 pgetbl_ctl; - pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + if (INTEL_GTT_GEN > 4 || IS_PINEVIEW) + overhead_entries = 0; + else + overhead_entries = intel_private.base.gtt_mappable_entries + / 1024; - /* The 965 has a field telling us the size of the GTT, - * which may be larger than what is necessary to map the - * aperture. - */ - switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { - case I965_PGETBL_SIZE_128KB: - size = 128; - break; - case I965_PGETBL_SIZE_256KB: - size = 256; - break; - case I965_PGETBL_SIZE_512KB: - size = 512; - break; - case I965_PGETBL_SIZE_1MB: - size = 1024; - break; - case I965_PGETBL_SIZE_2MB: - size = 2048; - break; - case I965_PGETBL_SIZE_1_5MB: - size = 1024 + 512; - break; - default: - dev_info(&intel_private.pcidev->dev, - "unknown page table size, assuming 512KB\n"); - size = 512; - } - size += 4; /* add in BIOS popup space */ - } else if (IS_G33 && !IS_PINEVIEW) { - /* G33's GTT size defined in gmch_ctrl */ - switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { - case G33_PGETBL_SIZE_1M: - size = 1024; - break; - case G33_PGETBL_SIZE_2M: - size = 2048; - break; - default: - dev_info(&agp_bridge->dev->dev, - "unknown page table size 0x%x, assuming 512KB\n", - (gmch_ctrl & G33_PGETBL_SIZE_MASK)); - size = 512; - } - size += 4; - } else if (IS_G4X || IS_PINEVIEW) { - /* On 4 series hardware, GTT stolen is separate from graphics - * stolen, ignore it in stolen gtt entries counting. However, - * 4KB of the stolen memory doesn't get mapped to the GTT. - */ - size = 4; - } else { - /* On previous hardware, the GTT size was just what was - * required to map the aperture. - */ - size = agp_bridge->driver->fetch_size() + 4; - } + overhead_entries += 1; /* BIOS popup */ - if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { + if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || + intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { switch (gmch_ctrl & I830_GMCH_GMS_MASK) { case I830_GMCH_GMS_STOLEN_512: - gtt_entries = KB(512) - KB(size); + stolen_size = KB(512); break; case I830_GMCH_GMS_STOLEN_1024: - gtt_entries = MB(1) - KB(size); + stolen_size = MB(1); break; case I830_GMCH_GMS_STOLEN_8192: - gtt_entries = MB(8) - KB(size); + stolen_size = MB(8); break; case I830_GMCH_GMS_LOCAL: rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); - gtt_entries = (I830_RDRAM_ND(rdct) + 1) * + stolen_size = (I830_RDRAM_ND(rdct) + 1) * MB(ddt[I830_RDRAM_DDT(rdct)]); local = 1; break; default: - gtt_entries = 0; + stolen_size = 0; break; } - } else if (IS_SNB) { + } else if (INTEL_GTT_GEN == 6) { /* * SandyBridge has new memory control reg at 0x50.w */ @@ -626,149 +528,292 @@ static void intel_i830_init_gtt_entries(void) pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { case SNB_GMCH_GMS_STOLEN_32M: - gtt_entries = MB(32) - KB(size); + stolen_size = MB(32); break; case SNB_GMCH_GMS_STOLEN_64M: - gtt_entries = MB(64) - KB(size); + stolen_size = MB(64); break; case SNB_GMCH_GMS_STOLEN_96M: - gtt_entries = MB(96) - KB(size); + stolen_size = MB(96); break; case SNB_GMCH_GMS_STOLEN_128M: - gtt_entries = MB(128) - KB(size); + stolen_size = MB(128); break; case SNB_GMCH_GMS_STOLEN_160M: - gtt_entries = MB(160) - KB(size); + stolen_size = MB(160); break; case SNB_GMCH_GMS_STOLEN_192M: - gtt_entries = MB(192) - KB(size); + stolen_size = MB(192); break; case SNB_GMCH_GMS_STOLEN_224M: - gtt_entries = MB(224) - KB(size); + stolen_size = MB(224); break; case SNB_GMCH_GMS_STOLEN_256M: - gtt_entries = MB(256) - KB(size); + stolen_size = MB(256); break; case SNB_GMCH_GMS_STOLEN_288M: - gtt_entries = MB(288) - KB(size); + stolen_size = MB(288); break; case SNB_GMCH_GMS_STOLEN_320M: - gtt_entries = MB(320) - KB(size); + stolen_size = MB(320); break; case SNB_GMCH_GMS_STOLEN_352M: - gtt_entries = MB(352) - KB(size); + stolen_size = MB(352); break; case SNB_GMCH_GMS_STOLEN_384M: - gtt_entries = MB(384) - KB(size); + stolen_size = MB(384); break; case SNB_GMCH_GMS_STOLEN_416M: - gtt_entries = MB(416) - KB(size); + stolen_size = MB(416); break; case SNB_GMCH_GMS_STOLEN_448M: - gtt_entries = MB(448) - KB(size); + stolen_size = MB(448); break; case SNB_GMCH_GMS_STOLEN_480M: - gtt_entries = MB(480) - KB(size); + stolen_size = MB(480); break; case SNB_GMCH_GMS_STOLEN_512M: - gtt_entries = MB(512) - KB(size); + stolen_size = MB(512); break; } } else { switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: - gtt_entries = MB(1) - KB(size); + stolen_size = MB(1); break; case I855_GMCH_GMS_STOLEN_4M: - gtt_entries = MB(4) - KB(size); + stolen_size = MB(4); break; case I855_GMCH_GMS_STOLEN_8M: - gtt_entries = MB(8) - KB(size); + stolen_size = MB(8); break; case I855_GMCH_GMS_STOLEN_16M: - gtt_entries = MB(16) - KB(size); + stolen_size = MB(16); break; case I855_GMCH_GMS_STOLEN_32M: - gtt_entries = MB(32) - KB(size); + stolen_size = MB(32); break; case I915_GMCH_GMS_STOLEN_48M: - /* Check it's really I915G */ - if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) - gtt_entries = MB(48) - KB(size); - else - gtt_entries = 0; + stolen_size = MB(48); break; case I915_GMCH_GMS_STOLEN_64M: - /* Check it's really I915G */ - if (IS_I915 || IS_I965 || IS_G33 || IS_G4X) - gtt_entries = MB(64) - KB(size); - else - gtt_entries = 0; + stolen_size = MB(64); break; case G33_GMCH_GMS_STOLEN_128M: - if (IS_G33 || IS_I965 || IS_G4X) - gtt_entries = MB(128) - KB(size); - else - gtt_entries = 0; + stolen_size = MB(128); break; case G33_GMCH_GMS_STOLEN_256M: - if (IS_G33 || IS_I965 || IS_G4X) - gtt_entries = MB(256) - KB(size); - else - gtt_entries = 0; + stolen_size = MB(256); break; case INTEL_GMCH_GMS_STOLEN_96M: - if (IS_I965 || IS_G4X) - gtt_entries = MB(96) - KB(size); - else - gtt_entries = 0; + stolen_size = MB(96); break; case INTEL_GMCH_GMS_STOLEN_160M: - if (IS_I965 || IS_G4X) - gtt_entries = MB(160) - KB(size); - else - gtt_entries = 0; + stolen_size = MB(160); break; case INTEL_GMCH_GMS_STOLEN_224M: - if (IS_I965 || IS_G4X) - gtt_entries = MB(224) - KB(size); - else - gtt_entries = 0; + stolen_size = MB(224); break; case INTEL_GMCH_GMS_STOLEN_352M: - if (IS_I965 || IS_G4X) - gtt_entries = MB(352) - KB(size); - else - gtt_entries = 0; + stolen_size = MB(352); break; default: - gtt_entries = 0; + stolen_size = 0; break; } } - if (!local && gtt_entries > intel_max_stolen) { - dev_info(&agp_bridge->dev->dev, + + if (!local && stolen_size > intel_max_stolen) { + dev_info(&intel_private.bridge_dev->dev, "detected %dK stolen memory, trimming to %dK\n", - gtt_entries / KB(1), intel_max_stolen / KB(1)); - gtt_entries = intel_max_stolen / KB(4); - } else if (gtt_entries > 0) { - dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", - gtt_entries / KB(1), local ? "local" : "stolen"); - gtt_entries /= KB(4); + stolen_size / KB(1), intel_max_stolen / KB(1)); + stolen_size = intel_max_stolen; + } else if (stolen_size > 0) { + dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", + stolen_size / KB(1), local ? "local" : "stolen"); } else { - dev_info(&agp_bridge->dev->dev, + dev_info(&intel_private.bridge_dev->dev, "no pre-allocated video memory detected\n"); - gtt_entries = 0; + stolen_size = 0; } - intel_private.gtt_entries = gtt_entries; + stolen_entries = stolen_size/KB(4) - overhead_entries; + + return stolen_entries; } -static void intel_i830_fini_flush(void) +static unsigned int intel_gtt_total_entries(void) +{ + int size; + + if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) { + u32 pgetbl_ctl; + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + + switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { + case I965_PGETBL_SIZE_128KB: + size = KB(128); + break; + case I965_PGETBL_SIZE_256KB: + size = KB(256); + break; + case I965_PGETBL_SIZE_512KB: + size = KB(512); + break; + case I965_PGETBL_SIZE_1MB: + size = KB(1024); + break; + case I965_PGETBL_SIZE_2MB: + size = KB(2048); + break; + case I965_PGETBL_SIZE_1_5MB: + size = KB(1024 + 512); + break; + default: + dev_info(&intel_private.pcidev->dev, + "unknown page table size, assuming 512KB\n"); + size = KB(512); + } + + return size/4; + } else if (INTEL_GTT_GEN == 6) { + u16 snb_gmch_ctl; + + pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); + switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { + default: + case SNB_GTT_SIZE_0M: + printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); + size = MB(0); + break; + case SNB_GTT_SIZE_1M: + size = MB(1); + break; + case SNB_GTT_SIZE_2M: + size = MB(2); + break; + } + return size/4; + } else { + /* On previous hardware, the GTT size was just what was + * required to map the aperture. + */ + return intel_private.base.gtt_mappable_entries; + } +} + +static unsigned int intel_gtt_mappable_entries(void) +{ + unsigned int aperture_size; + + if (INTEL_GTT_GEN == 2) { + u16 gmch_ctrl; + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + + if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M) + aperture_size = MB(64); + else + aperture_size = MB(128); + } else { + /* 9xx supports large sizes, just look at the length */ + aperture_size = pci_resource_len(intel_private.pcidev, 2); + } + + return aperture_size >> PAGE_SHIFT; +} + +static void intel_gtt_teardown_scratch_page(void) +{ + set_pages_wb(intel_private.scratch_page, 1); + pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + put_page(intel_private.scratch_page); + __free_page(intel_private.scratch_page); +} + +static void intel_gtt_cleanup(void) +{ + intel_private.driver->cleanup(); + + iounmap(intel_private.gtt); + iounmap(intel_private.registers); + + intel_gtt_teardown_scratch_page(); +} + +static int intel_gtt_init(void) +{ + u32 gtt_map_size; + int ret; + + ret = intel_private.driver->setup(); + if (ret != 0) + return ret; + + intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); + intel_private.base.gtt_total_entries = intel_gtt_total_entries(); + + dev_info(&intel_private.bridge_dev->dev, + "detected gtt size: %dK total, %dK mappable\n", + intel_private.base.gtt_total_entries * 4, + intel_private.base.gtt_mappable_entries * 4); + + gtt_map_size = intel_private.base.gtt_total_entries * 4; + + intel_private.gtt = ioremap(intel_private.gtt_bus_addr, + gtt_map_size); + if (!intel_private.gtt) { + intel_private.driver->cleanup(); + iounmap(intel_private.registers); + return -ENOMEM; + } + + global_cache_flush(); /* FIXME: ? */ + + /* we have to call this as early as possible after the MMIO base address is known */ + intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); + if (intel_private.base.gtt_stolen_entries == 0) { + intel_private.driver->cleanup(); + iounmap(intel_private.registers); + iounmap(intel_private.gtt); + return -ENOMEM; + } + + ret = intel_gtt_setup_scratch_page(); + if (ret != 0) { + intel_gtt_cleanup(); + return ret; + } + + return 0; +} + +static int intel_fake_agp_fetch_size(void) +{ + int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes); + unsigned int aper_size; + int i; + + aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) + / MB(1); + + for (i = 0; i < num_sizes; i++) { + if (aper_size == intel_fake_agp_sizes[i].size) { + agp_bridge->current_size = + (void *) (intel_fake_agp_sizes + i); + return aper_size; + } + } + + return 0; +} + +static void i830_cleanup(void) { kunmap(intel_private.i8xx_page); intel_private.i8xx_flush_page = NULL; - unmap_page_from_agp(intel_private.i8xx_page); __free_page(intel_private.i8xx_page); intel_private.i8xx_page = NULL; @@ -780,13 +825,13 @@ static void intel_i830_setup_flush(void) if (intel_private.i8xx_page) return; - intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); + intel_private.i8xx_page = alloc_page(GFP_KERNEL); if (!intel_private.i8xx_page) return; intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); if (!intel_private.i8xx_flush_page) - intel_i830_fini_flush(); + i830_cleanup(); } /* The chipset_flush interface needs to get data that has already been @@ -799,7 +844,7 @@ static void intel_i830_setup_flush(void) * that buffer out, we just fill 1KB and clflush it out, on the assumption * that it'll push whatever was in there out. It appears to work. */ -static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) +static void i830_chipset_flush(void) { unsigned int *pg = intel_private.i8xx_flush_page; @@ -811,169 +856,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) printk(KERN_ERR "Timed out waiting for cache flush.\n"); } -/* The intel i830 automatically initializes the agp aperture during POST. - * Use the memory already set aside for in the GTT. - */ -static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) +static void i830_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) { - int page_order; - struct aper_size_info_fixed *size; - int num_entries; - u32 temp; + u32 pte_flags = I810_PTE_VALID; + + switch (flags) { + case AGP_DCACHE_MEMORY: + pte_flags |= I810_PTE_LOCAL; + break; + case AGP_USER_CACHED_MEMORY: + pte_flags |= I830_PTE_SYSTEM_CACHED; + break; + } - size = agp_bridge->current_size; - page_order = size->page_order; - num_entries = size->num_entries; - agp_bridge->gatt_table_real = NULL; + writel(addr | pte_flags, intel_private.gtt + entry); +} - pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); - temp &= 0xfff80000; +static void intel_enable_gtt(void) +{ + u32 gma_addr; + u16 gmch_ctrl; - intel_private.registers = ioremap(temp, 128 * 4096); + if (INTEL_GTT_GEN == 2) + pci_read_config_dword(intel_private.pcidev, I810_GMADDR, + &gma_addr); + else + pci_read_config_dword(intel_private.pcidev, I915_GMADDR, + &gma_addr); + + intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); + + pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); + gmch_ctrl |= I830_GMCH_ENABLED; + pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); + + writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED, + intel_private.registers+I810_PGETBL_CTL); + readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ +} + +static int i830_setup(void) +{ + u32 reg_addr; + + pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr); + reg_addr &= 0xfff80000; + + intel_private.registers = ioremap(reg_addr, KB(64)); if (!intel_private.registers) return -ENOMEM; - temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - global_cache_flush(); /* FIXME: ?? */ + intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; + intel_private.pte_bus_addr = + readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - /* we have to call this as early as possible after the MMIO base address is known */ - intel_i830_init_gtt_entries(); - if (intel_private.gtt_entries == 0) { - iounmap(intel_private.registers); - return -ENOMEM; - } + intel_i830_setup_flush(); + return 0; +} + +static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge) +{ + agp_bridge->gatt_table_real = NULL; agp_bridge->gatt_table = NULL; - - agp_bridge->gatt_bus_addr = temp; + agp_bridge->gatt_bus_addr = 0; return 0; } -/* Return the gatt table to a sane state. Use the top of stolen - * memory for the GTT. - */ -static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge) +static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge) { return 0; } -static int intel_i830_fetch_size(void) +static int intel_fake_agp_configure(void) { - u16 gmch_ctrl; - struct aper_size_info_fixed *values; - - values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); - - if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB && - agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) { - /* 855GM/852GM/865G has 128MB aperture size */ - agp_bridge->current_size = (void *) values; - agp_bridge->aperture_size_idx = 0; - return values[0].size; - } - - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); - - if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) { - agp_bridge->current_size = (void *) values; - agp_bridge->aperture_size_idx = 0; - return values[0].size; - } else { - agp_bridge->current_size = (void *) (values + 1); - agp_bridge->aperture_size_idx = 1; - return values[1].size; - } - - return 0; -} - -static int intel_i830_configure(void) -{ - struct aper_size_info_fixed *current_size; - u32 temp; - u16 gmch_ctrl; int i; - current_size = A_SIZE_FIX(agp_bridge->current_size); + intel_enable_gtt(); - pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); - agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); - gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); - - writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ - - if (agp_bridge->driver->needs_scratch_page) { - for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { - writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); - } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ + for (i = intel_private.base.gtt_stolen_entries; + i < intel_private.base.gtt_total_entries; i++) { + intel_private.driver->write_entry(intel_private.scratch_page_dma, + i, 0); } + readl(intel_private.gtt+i-1); /* PCI Posting. */ global_cache_flush(); - intel_i830_setup_flush(); return 0; } -static void intel_i830_cleanup(void) +static bool i830_check_flags(unsigned int flags) { - iounmap(intel_private.registers); + switch (flags) { + case 0: + case AGP_PHYS_MEMORY: + case AGP_USER_CACHED_MEMORY: + case AGP_USER_MEMORY: + return true; + } + + return false; } -static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, - int type) +static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, + unsigned int sg_len, + unsigned int pg_start, + unsigned int flags) { - int i, j, num_entries; - void *temp; + struct scatterlist *sg; + unsigned int len, m; + int i, j; + + j = pg_start; + + /* sg may merge pages, but we have to separate + * per-page addr for GTT */ + for_each_sg(sg_list, sg, sg_len, i) { + len = sg_dma_len(sg) >> PAGE_SHIFT; + for (m = 0; m < len; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + intel_private.driver->write_entry(addr, + j, flags); + j++; + } + } + readl(intel_private.gtt+j-1); +} + +static int intel_fake_agp_insert_entries(struct agp_memory *mem, + off_t pg_start, int type) +{ + int i, j; int ret = -EINVAL; - int mask_type; if (mem->page_count == 0) goto out; - temp = agp_bridge->current_size; - num_entries = A_SIZE_FIX(temp)->num_entries; - - if (pg_start < intel_private.gtt_entries) { + if (pg_start < intel_private.base.gtt_stolen_entries) { dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", - pg_start, intel_private.gtt_entries); + "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", + pg_start, intel_private.base.gtt_stolen_entries); dev_info(&intel_private.pcidev->dev, "trying to insert into local/stolen memory\n"); goto out_err; } - if ((pg_start + mem->page_count) > num_entries) + if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries) goto out_err; - /* The i830 can't check the GTT for entries since its read only, - * depend on the caller to make the correct offset decisions. - */ - if (type != mem->type) goto out_err; - mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); - - if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && - mask_type != INTEL_AGP_CACHED_MEMORY) + if (!intel_private.driver->check_flags(type)) goto out_err; if (!mem->is_flushed) global_cache_flush(); - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - page_to_phys(mem->pages[i]), mask_type), - intel_private.registers+I810_PTE_BASE+(j*4)); + if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { + ret = intel_agp_map_memory(mem); + if (ret != 0) + return ret; + + intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, + pg_start, type); + } else { + for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { + dma_addr_t addr = page_to_phys(mem->pages[i]); + intel_private.driver->write_entry(addr, + j, type); + } + readl(intel_private.gtt+j-1); } - readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); out: ret = 0; @@ -982,29 +1042,39 @@ out_err: return ret; } -static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, - int type) +static int intel_fake_agp_remove_entries(struct agp_memory *mem, + off_t pg_start, int type) { int i; if (mem->page_count == 0) return 0; - if (pg_start < intel_private.gtt_entries) { + if (pg_start < intel_private.base.gtt_stolen_entries) { dev_info(&intel_private.pcidev->dev, "trying to disable local/stolen memory\n"); return -EINVAL; } + if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) + intel_agp_unmap_memory(mem); + for (i = pg_start; i < (mem->page_count + pg_start); i++) { - writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); + intel_private.driver->write_entry(intel_private.scratch_page_dma, + i, 0); } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); + readl(intel_private.gtt+i-1); return 0; } -static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) +static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge) +{ + intel_private.driver->chipset_flush(); +} + +static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, + int type) { if (type == AGP_PHYS_MEMORY) return alloc_agpphysmem_i8xx(pg_count, type); @@ -1015,9 +1085,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) static int intel_alloc_chipset_flush_resource(void) { int ret; - ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, + ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE, PAGE_SIZE, PCIBIOS_MIN_MEM, 0, - pcibios_align_resource, agp_bridge->dev); + pcibios_align_resource, intel_private.bridge_dev); return ret; } @@ -1027,11 +1097,11 @@ static void intel_i915_setup_chipset_flush(void) int ret; u32 temp; - pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); + pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp); if (!(temp & 0x1)) { intel_alloc_chipset_flush_resource(); intel_private.resource_valid = 1; - pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); + pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); } else { temp &= ~1; @@ -1050,17 +1120,17 @@ static void intel_i965_g33_setup_chipset_flush(void) u32 temp_hi, temp_lo; int ret; - pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); - pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); + pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi); + pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo); if (!(temp_lo & 0x1)) { intel_alloc_chipset_flush_resource(); intel_private.resource_valid = 1; - pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, + pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, upper_32_bits(intel_private.ifp_resource.start)); - pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); + pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); } else { u64 l64; @@ -1083,7 +1153,7 @@ static void intel_i9xx_setup_flush(void) if (intel_private.ifp_resource.start) return; - if (IS_SNB) + if (INTEL_GTT_GEN == 6) return; /* setup a resource for this object */ @@ -1091,7 +1161,7 @@ static void intel_i9xx_setup_flush(void) intel_private.ifp_resource.flags = IORESOURCE_MEM; /* Setup chipset flush for 915 */ - if (IS_I965 || IS_G33 || IS_G4X) { + if (IS_G33 || INTEL_GTT_GEN >= 4) { intel_i965_g33_setup_chipset_flush(); } else { intel_i915_setup_chipset_flush(); @@ -1104,41 +1174,7 @@ static void intel_i9xx_setup_flush(void) "can't ioremap flush page - no chipset flushing\n"); } -static int intel_i9xx_configure(void) -{ - struct aper_size_info_fixed *current_size; - u32 temp; - u16 gmch_ctrl; - int i; - - current_size = A_SIZE_FIX(agp_bridge->current_size); - - pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp); - - agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); - gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl); - - writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ - - if (agp_bridge->driver->needs_scratch_page) { - for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { - writel(agp_bridge->scratch_page, intel_private.gtt+i); - } - readl(intel_private.gtt+i-1); /* PCI Posting. */ - } - - global_cache_flush(); - - intel_i9xx_setup_flush(); - - return 0; -} - -static void intel_i915_cleanup(void) +static void i9xx_cleanup(void) { if (intel_private.i9xx_flush_page) iounmap(intel_private.i9xx_flush_page); @@ -1146,320 +1182,93 @@ static void intel_i915_cleanup(void) release_resource(&intel_private.ifp_resource); intel_private.ifp_resource.start = 0; intel_private.resource_valid = 0; - iounmap(intel_private.gtt); - iounmap(intel_private.registers); } -static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) +static void i9xx_chipset_flush(void) { if (intel_private.i9xx_flush_page) writel(1, intel_private.i9xx_flush_page); } -static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, - int type) -{ - int num_entries; - void *temp; - int ret = -EINVAL; - int mask_type; - - if (mem->page_count == 0) - goto out; - - temp = agp_bridge->current_size; - num_entries = A_SIZE_FIX(temp)->num_entries; - - if (pg_start < intel_private.gtt_entries) { - dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", - pg_start, intel_private.gtt_entries); - - dev_info(&intel_private.pcidev->dev, - "trying to insert into local/stolen memory\n"); - goto out_err; - } - - if ((pg_start + mem->page_count) > num_entries) - goto out_err; - - /* The i915 can't check the GTT for entries since it's read only; - * depend on the caller to make the correct offset decisions. - */ - - if (type != mem->type) - goto out_err; - - mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); - - if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY && - mask_type != INTEL_AGP_CACHED_MEMORY) - goto out_err; - - if (!mem->is_flushed) - global_cache_flush(); - - intel_agp_insert_sg_entries(mem, pg_start, mask_type); - - out: - ret = 0; - out_err: - mem->is_flushed = true; - return ret; -} - -static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, - int type) -{ - int i; - - if (mem->page_count == 0) - return 0; - - if (pg_start < intel_private.gtt_entries) { - dev_info(&intel_private.pcidev->dev, - "trying to disable local/stolen memory\n"); - return -EINVAL; - } - - for (i = pg_start; i < (mem->page_count + pg_start); i++) - writel(agp_bridge->scratch_page, intel_private.gtt+i); - - readl(intel_private.gtt+i-1); - - return 0; -} - -/* Return the aperture size by just checking the resource length. The effect - * described in the spec of the MSAC registers is just changing of the - * resource size. - */ -static int intel_i9xx_fetch_size(void) -{ - int num_sizes = ARRAY_SIZE(intel_i830_sizes); - int aper_size; /* size in megabytes */ - int i; - - aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1); - - for (i = 0; i < num_sizes; i++) { - if (aper_size == intel_i830_sizes[i].size) { - agp_bridge->current_size = intel_i830_sizes + i; - return aper_size; - } - } - - return 0; -} - -static int intel_i915_get_gtt_size(void) -{ - int size; - - if (IS_G33) { - u16 gmch_ctrl; - - /* G33's GTT size defined in gmch_ctrl */ - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); - switch (gmch_ctrl & I830_GMCH_GMS_MASK) { - case I830_GMCH_GMS_STOLEN_512: - size = 512; - break; - case I830_GMCH_GMS_STOLEN_1024: - size = 1024; - break; - case I830_GMCH_GMS_STOLEN_8192: - size = 8*1024; - break; - default: - dev_info(&agp_bridge->dev->dev, - "unknown page table size 0x%x, assuming 512KB\n", - (gmch_ctrl & I830_GMCH_GMS_MASK)); - size = 512; - } - } else { - /* On previous hardware, the GTT size was just what was - * required to map the aperture. - */ - size = agp_bridge->driver->fetch_size(); - } - - return KB(size); -} - -/* The intel i915 automatically initializes the agp aperture during POST. - * Use the memory already set aside for in the GTT. - */ -static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) -{ - int page_order; - struct aper_size_info_fixed *size; - int num_entries; - u32 temp, temp2; - int gtt_map_size; - - size = agp_bridge->current_size; - page_order = size->page_order; - num_entries = size->num_entries; - agp_bridge->gatt_table_real = NULL; - - pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); - pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); - - gtt_map_size = intel_i915_get_gtt_size(); - - intel_private.gtt = ioremap(temp2, gtt_map_size); - if (!intel_private.gtt) - return -ENOMEM; - - intel_private.gtt_total_size = gtt_map_size / 4; - - temp &= 0xfff80000; - - intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) { - iounmap(intel_private.gtt); - return -ENOMEM; - } - - temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - global_cache_flush(); /* FIXME: ? */ - - /* we have to call this as early as possible after the MMIO base address is known */ - intel_i830_init_gtt_entries(); - if (intel_private.gtt_entries == 0) { - iounmap(intel_private.gtt); - iounmap(intel_private.registers); - return -ENOMEM; - } - - agp_bridge->gatt_table = NULL; - - agp_bridge->gatt_bus_addr = temp; - - return 0; -} - -/* - * The i965 supports 36-bit physical addresses, but to keep - * the format of the GTT the same, the bits that don't fit - * in a 32-bit word are shifted down to bits 4..7. - * - * Gcc is smart enough to notice that "(addr >> 28) & 0xf0" - * is always zero on 32-bit architectures, so no need to make - * this conditional. - */ -static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, - dma_addr_t addr, int type) +static void i965_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) { /* Shift high bits down */ addr |= (addr >> 28) & 0xf0; - - /* Type checking must be done elsewhere */ - return addr | bridge->driver->masks[type].mask; + writel(addr | I810_PTE_VALID, intel_private.gtt + entry); } -static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, - dma_addr_t addr, int type) +static bool gen6_check_flags(unsigned int flags) { + return true; +} + +static void gen6_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_UNCACHED_MEMORY) + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } else { /* set 'normal'/'cached' to LLC by default */ + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + /* gen6 has bit11-4 for physical addr bit39-32 */ addr |= (addr >> 28) & 0xff0; - - /* Type checking must be done elsewhere */ - return addr | bridge->driver->masks[type].mask; + writel(addr | pte_flags, intel_private.gtt + entry); } -static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) +static void gen6_cleanup(void) { - u16 snb_gmch_ctl; +} - switch (agp_bridge->dev->device) { - case PCI_DEVICE_ID_INTEL_GM45_HB: - case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB: - case PCI_DEVICE_ID_INTEL_Q45_HB: - case PCI_DEVICE_ID_INTEL_G45_HB: - case PCI_DEVICE_ID_INTEL_G41_HB: - case PCI_DEVICE_ID_INTEL_B43_HB: - case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB: - case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: - case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: - case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: - *gtt_offset = *gtt_size = MB(2); - break; - case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: - case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: - case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB: - *gtt_offset = MB(2); +static int i9xx_setup(void) +{ + u32 reg_addr; - pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); - switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { + pci_read_config_dword(intel_private.pcidev, I915_MMADDR, ®_addr); + + reg_addr &= 0xfff80000; + + intel_private.registers = ioremap(reg_addr, 128 * 4096); + if (!intel_private.registers) + return -ENOMEM; + + if (INTEL_GTT_GEN == 3) { + u32 gtt_addr; + + pci_read_config_dword(intel_private.pcidev, + I915_PTEADDR, >t_addr); + intel_private.gtt_bus_addr = gtt_addr; + } else { + u32 gtt_offset; + + switch (INTEL_GTT_GEN) { + case 5: + case 6: + gtt_offset = MB(2); + break; + case 4: default: - case SNB_GTT_SIZE_0M: - printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl); - *gtt_size = MB(0); - break; - case SNB_GTT_SIZE_1M: - *gtt_size = MB(1); - break; - case SNB_GTT_SIZE_2M: - *gtt_size = MB(2); + gtt_offset = KB(512); break; } - break; - default: - *gtt_offset = *gtt_size = KB(512); - } -} - -/* The intel i965 automatically initializes the agp aperture during POST. - * Use the memory already set aside for in the GTT. - */ -static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) -{ - int page_order; - struct aper_size_info_fixed *size; - int num_entries; - u32 temp; - int gtt_offset, gtt_size; - - size = agp_bridge->current_size; - page_order = size->page_order; - num_entries = size->num_entries; - agp_bridge->gatt_table_real = NULL; - - pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); - - temp &= 0xfff00000; - - intel_i965_get_gtt_range(>t_offset, >t_size); - - intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); - - if (!intel_private.gtt) - return -ENOMEM; - - intel_private.gtt_total_size = gtt_size / 4; - - intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) { - iounmap(intel_private.gtt); - return -ENOMEM; + intel_private.gtt_bus_addr = reg_addr + gtt_offset; } - temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - global_cache_flush(); /* FIXME: ? */ + intel_private.pte_bus_addr = + readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - /* we have to call this as early as possible after the MMIO base address is known */ - intel_i830_init_gtt_entries(); - if (intel_private.gtt_entries == 0) { - iounmap(intel_private.gtt); - iounmap(intel_private.registers); - return -ENOMEM; - } - - agp_bridge->gatt_table = NULL; - - agp_bridge->gatt_bus_addr = temp; + intel_i9xx_setup_flush(); return 0; } @@ -1475,7 +1284,7 @@ static const struct agp_bridge_driver intel_810_driver = { .cleanup = intel_i810_cleanup, .mask_memory = intel_i810_mask_memory, .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, + .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, @@ -1490,161 +1299,282 @@ static const struct agp_bridge_driver intel_810_driver = { .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; -static const struct agp_bridge_driver intel_830_driver = { +static const struct agp_bridge_driver intel_fake_agp_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, - .needs_scratch_page = true, - .configure = intel_i830_configure, - .fetch_size = intel_i830_fetch_size, - .cleanup = intel_i830_cleanup, - .mask_memory = intel_i810_mask_memory, - .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, + .aperture_sizes = intel_fake_agp_sizes, + .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), + .configure = intel_fake_agp_configure, + .fetch_size = intel_fake_agp_fetch_size, + .cleanup = intel_gtt_cleanup, + .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = intel_i830_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, - .insert_memory = intel_i830_insert_entries, - .remove_memory = intel_i830_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, + .create_gatt_table = intel_fake_agp_create_gatt_table, + .free_gatt_table = intel_fake_agp_free_gatt_table, + .insert_memory = intel_fake_agp_insert_entries, + .remove_memory = intel_fake_agp_remove_entries, + .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, - .chipset_flush = intel_i830_chipset_flush, + .chipset_flush = intel_fake_agp_chipset_flush, }; -static const struct agp_bridge_driver intel_915_driver = { - .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, - .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, - .needs_scratch_page = true, - .configure = intel_i9xx_configure, - .fetch_size = intel_i9xx_fetch_size, - .cleanup = intel_i915_cleanup, - .mask_memory = intel_i810_mask_memory, - .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_i915_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, - .insert_memory = intel_i915_insert_entries, - .remove_memory = intel_i915_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, - .chipset_flush = intel_i915_chipset_flush, -#ifdef USE_PCI_DMA_API - .agp_map_page = intel_agp_map_page, - .agp_unmap_page = intel_agp_unmap_page, - .agp_map_memory = intel_agp_map_memory, - .agp_unmap_memory = intel_agp_unmap_memory, -#endif +static const struct intel_gtt_driver i81x_gtt_driver = { + .gen = 1, + .dma_mask_size = 32, +}; +static const struct intel_gtt_driver i8xx_gtt_driver = { + .gen = 2, + .setup = i830_setup, + .cleanup = i830_cleanup, + .write_entry = i830_write_entry, + .dma_mask_size = 32, + .check_flags = i830_check_flags, + .chipset_flush = i830_chipset_flush, +}; +static const struct intel_gtt_driver i915_gtt_driver = { + .gen = 3, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + /* i945 is the last gpu to need phys mem (for overlay and cursors). */ + .write_entry = i830_write_entry, + .dma_mask_size = 32, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver g33_gtt_driver = { + .gen = 3, + .is_g33 = 1, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver pineview_gtt_driver = { + .gen = 3, + .is_pineview = 1, .is_g33 = 1, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver i965_gtt_driver = { + .gen = 4, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver g4x_gtt_driver = { + .gen = 5, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver ironlake_gtt_driver = { + .gen = 5, + .is_ironlake = 1, + .setup = i9xx_setup, + .cleanup = i9xx_cleanup, + .write_entry = i965_write_entry, + .dma_mask_size = 36, + .check_flags = i830_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; +static const struct intel_gtt_driver sandybridge_gtt_driver = { + .gen = 6, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = gen6_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, }; -static const struct agp_bridge_driver intel_i965_driver = { - .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, - .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, - .needs_scratch_page = true, - .configure = intel_i9xx_configure, - .fetch_size = intel_i9xx_fetch_size, - .cleanup = intel_i915_cleanup, - .mask_memory = intel_i965_mask_memory, - .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_i965_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, - .insert_memory = intel_i915_insert_entries, - .remove_memory = intel_i915_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, - .chipset_flush = intel_i915_chipset_flush, -#ifdef USE_PCI_DMA_API - .agp_map_page = intel_agp_map_page, - .agp_unmap_page = intel_agp_unmap_page, - .agp_map_memory = intel_agp_map_memory, - .agp_unmap_memory = intel_agp_unmap_memory, -#endif +/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of + * driver and gmch_driver must be non-null, and find_gmch will determine + * which one should be used if a gmch_chip_id is present. + */ +static const struct intel_gtt_driver_description { + unsigned int gmch_chip_id; + char *name; + const struct agp_bridge_driver *gmch_driver; + const struct intel_gtt_driver *gtt_driver; +} intel_gtt_chipsets[] = { + { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver, + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver, + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver, + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver, + &i81x_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", + &intel_fake_agp_driver, &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", + &intel_fake_agp_driver, &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82854_IG, "854", + &intel_fake_agp_driver, &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", + &intel_fake_agp_driver, &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_82865_IG, "865", + &intel_fake_agp_driver, &i8xx_gtt_driver}, + { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", + &intel_fake_agp_driver, &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", + &intel_fake_agp_driver, &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", + &intel_fake_agp_driver, &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", + &intel_fake_agp_driver, &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", + &intel_fake_agp_driver, &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", + &intel_fake_agp_driver, &i915_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", + &intel_fake_agp_driver, &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", + &intel_fake_agp_driver, &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", + &intel_fake_agp_driver, &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", + &intel_fake_agp_driver, &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", + &intel_fake_agp_driver, &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", + &intel_fake_agp_driver, &i965_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G33_IG, "G33", + &intel_fake_agp_driver, &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", + &intel_fake_agp_driver, &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", + &intel_fake_agp_driver, &g33_gtt_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", + &intel_fake_agp_driver, &pineview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", + &intel_fake_agp_driver, &pineview_gtt_driver }, + { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", + &intel_fake_agp_driver, &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", + &intel_fake_agp_driver, &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", + &intel_fake_agp_driver, &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", + &intel_fake_agp_driver, &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_B43_IG, "B43", + &intel_fake_agp_driver, &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", + &intel_fake_agp_driver, &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_G41_IG, "G41", + &intel_fake_agp_driver, &g4x_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, + "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, + "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + { 0, NULL, NULL } }; -static const struct agp_bridge_driver intel_gen6_driver = { - .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, - .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, - .needs_scratch_page = true, - .configure = intel_i9xx_configure, - .fetch_size = intel_i9xx_fetch_size, - .cleanup = intel_i915_cleanup, - .mask_memory = intel_gen6_mask_memory, - .masks = intel_gen6_masks, - .agp_enable = intel_i810_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_i965_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, - .insert_memory = intel_i915_insert_entries, - .remove_memory = intel_i915_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_gen6_type_to_mask_type, - .chipset_flush = intel_i915_chipset_flush, -#ifdef USE_PCI_DMA_API - .agp_map_page = intel_agp_map_page, - .agp_unmap_page = intel_agp_unmap_page, - .agp_map_memory = intel_agp_map_memory, - .agp_unmap_memory = intel_agp_unmap_memory, -#endif -}; +static int find_gmch(u16 device) +{ + struct pci_dev *gmch_device; -static const struct agp_bridge_driver intel_g33_driver = { - .owner = THIS_MODULE, - .aperture_sizes = intel_i830_sizes, - .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 4, - .needs_scratch_page = true, - .configure = intel_i9xx_configure, - .fetch_size = intel_i9xx_fetch_size, - .cleanup = intel_i915_cleanup, - .mask_memory = intel_i965_mask_memory, - .masks = intel_i810_masks, - .agp_enable = intel_i810_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_i915_create_gatt_table, - .free_gatt_table = intel_i830_free_gatt_table, - .insert_memory = intel_i915_insert_entries, - .remove_memory = intel_i915_remove_entries, - .alloc_by_type = intel_i830_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = intel_i830_type_to_mask_type, - .chipset_flush = intel_i915_chipset_flush, -#ifdef USE_PCI_DMA_API - .agp_map_page = intel_agp_map_page, - .agp_unmap_page = intel_agp_unmap_page, - .agp_map_memory = intel_agp_map_memory, - .agp_unmap_memory = intel_agp_unmap_memory, -#endif -}; + gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { + gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, + device, gmch_device); + } + + if (!gmch_device) + return 0; + + intel_private.pcidev = gmch_device; + return 1; +} + +int intel_gmch_probe(struct pci_dev *pdev, + struct agp_bridge_data *bridge) +{ + int i, mask; + bridge->driver = NULL; + + for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { + if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { + bridge->driver = + intel_gtt_chipsets[i].gmch_driver; + intel_private.driver = + intel_gtt_chipsets[i].gtt_driver; + break; + } + } + + if (!bridge->driver) + return 0; + + bridge->dev_private_data = &intel_private; + bridge->dev = pdev; + + intel_private.bridge_dev = pci_dev_get(pdev); + + dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); + + mask = intel_private.driver->dma_mask_size; + if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) + dev_err(&intel_private.pcidev->dev, + "set gfx device dma mask %d-bit failed!\n", mask); + else + pci_set_consistent_dma_mask(intel_private.pcidev, + DMA_BIT_MASK(mask)); + + if (bridge->driver == &intel_810_driver) + return 1; + + if (intel_gtt_init() != 0) + return 0; + + return 1; +} +EXPORT_SYMBOL(intel_gmch_probe); + +struct intel_gtt *intel_gtt_get(void) +{ + return &intel_private.base; +} +EXPORT_SYMBOL(intel_gtt_get); + +void intel_gmch_remove(struct pci_dev *pdev) +{ + if (intel_private.pcidev) + pci_dev_put(intel_private.pcidev); + if (intel_private.bridge_dev) + pci_dev_put(intel_private.bridge_dev); +} +EXPORT_SYMBOL(intel_gmch_remove); + +MODULE_AUTHOR("Dave Jones "); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index 30879df3daea..cc9277885dd0 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -1 +1 @@ -obj-y += drm/ vga/ +obj-y += drm/ vga/ stub/ diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index f3a23a329f4e..997c43d04909 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -5,7 +5,7 @@ ccflags-y := -Iinclude/drm drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ - drm_context.o drm_dma.o drm_drawable.o \ + drm_context.o drm_dma.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index ba38e0147220..252fdb98b73a 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -193,7 +193,7 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data, * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired, allocates the - * memory via alloc_agp() and creates a drm_agp_mem entry for it. + * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. */ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { @@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u32) request->type; - if (!(memory = drm_alloc_agp(dev, pages, type))) { + if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { kfree(entry); return -ENOMEM; } @@ -423,38 +423,6 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) return head; } -/** Calls agp_allocate_memory() */ -DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge, - size_t pages, u32 type) -{ - return agp_allocate_memory(bridge, pages, type); -} - -/** Calls agp_free_memory() */ -int drm_agp_free_memory(DRM_AGP_MEM * handle) -{ - if (!handle) - return 0; - agp_free_memory(handle); - return 1; -} - -/** Calls agp_bind_memory() */ -int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start) -{ - if (!handle) - return -EINVAL; - return agp_bind_memory(handle, start); -} - -/** Calls agp_unbind_memory() */ -int drm_agp_unbind_memory(DRM_AGP_MEM * handle) -{ - if (!handle) - return -EINVAL; - return agp_unbind_memory(handle); -} - /** * Binds a collection of pages into AGP memory at the given offset, returning * the AGP memory structure containing them. @@ -474,7 +442,7 @@ drm_agp_bind_pages(struct drm_device *dev, DRM_DEBUG("\n"); - mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, + mem = agp_allocate_memory(dev->agp->bridge, num_pages, type); if (mem == NULL) { DRM_ERROR("Failed to allocate memory for %ld pages\n", @@ -487,7 +455,7 @@ drm_agp_bind_pages(struct drm_device *dev, mem->page_count = num_pages; mem->is_flushed = true; - ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE); + ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE); if (ret != 0) { DRM_ERROR("Failed to bind AGP memory: %d\n", ret); agp_free_memory(mem); diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 2607753a320b..6d440fb894cf 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -333,14 +333,6 @@ int drm_addctx(struct drm_device *dev, void *data, return -ENOMEM; } - if (ctx->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_ctor) - if (!dev->driver->context_ctor(dev, ctx->handle)) { - DRM_DEBUG("Running out of ctxs or memory.\n"); - return -ENOMEM; - } - } - ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); if (!ctx_entry) { DRM_DEBUG("out of memory\n"); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 37e0b4fa482a..6985cb1da72c 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1854,7 +1854,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, } if (fb->funcs->dirty) { - ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips); + ret = fb->funcs->dirty(fb, file_priv, flags, r->color, + clips, num_clips); } else { ret = -ENOSYS; goto out_err2; diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 677b275fa721..9d8c892d07c9 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -48,7 +48,6 @@ static struct drm_info_list drm_debugfs_list[] = { {"queues", drm_queues_info, 0}, {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, - {"gem_objects", drm_gem_object_info, DRIVER_GEM}, #if DRM_DEBUG_CODE {"vma", drm_vma_info, 0}, #endif diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c deleted file mode 100644 index c53c9768cc11..000000000000 --- a/drivers/gpu/drm/drm_drawable.c +++ /dev/null @@ -1,198 +0,0 @@ -/** - * \file drm_drawable.c - * IOCTLs for drawables - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - * \author Michel Dänzer - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include "drmP.h" - -/** - * Allocate drawable ID and memory to store information about it. - */ -int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - unsigned long irqflags; - struct drm_draw *draw = data; - int new_id = 0; - int ret; - -again: - if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { - DRM_ERROR("Out of memory expanding drawable idr\n"); - return -ENOMEM; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id); - if (ret == -EAGAIN) { - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - goto again; - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - draw->handle = new_id; - - DRM_DEBUG("%d\n", draw->handle); - - return 0; -} - -/** - * Free drawable ID and memory to store information about it. - */ -int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_draw *draw = data; - unsigned long irqflags; - struct drm_drawable_info *info; - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - info = drm_get_drawable_info(dev, draw->handle); - if (info == NULL) { - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - return -EINVAL; - } - kfree(info->rects); - kfree(info); - - idr_remove(&dev->drw_idr, draw->handle); - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_DEBUG("%d\n", draw->handle); - return 0; -} - -int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_update_draw *update = data; - unsigned long irqflags; - struct drm_clip_rect *rects; - struct drm_drawable_info *info; - int err; - - info = idr_find(&dev->drw_idr, update->handle); - if (!info) { - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; - if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { - DRM_ERROR("No such drawable %d\n", update->handle); - kfree(info); - return -EINVAL; - } - } - - switch (update->type) { - case DRM_DRAWABLE_CLIPRECTS: - if (update->num == 0) - rects = NULL; - else if (update->num != info->num_rects) { - rects = kmalloc(update->num * - sizeof(struct drm_clip_rect), - GFP_KERNEL); - } else - rects = info->rects; - - if (update->num && !rects) { - DRM_ERROR("Failed to allocate cliprect memory\n"); - err = -ENOMEM; - goto error; - } - - if (update->num && DRM_COPY_FROM_USER(rects, - (struct drm_clip_rect __user *) - (unsigned long)update->data, - update->num * - sizeof(*rects))) { - DRM_ERROR("Failed to copy cliprects from userspace\n"); - err = -EFAULT; - goto error; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - if (rects != info->rects) { - kfree(info->rects); - } - - info->rects = rects; - info->num_rects = update->num; - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - DRM_DEBUG("Updated %d cliprects for drawable %d\n", - info->num_rects, update->handle); - break; - default: - DRM_ERROR("Invalid update type %d\n", update->type); - return -EINVAL; - } - - return 0; - -error: - if (rects != info->rects) - kfree(rects); - - return err; -} - -/** - * Caller must hold the drawable spinlock! - */ -struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) -{ - return idr_find(&dev->drw_idr, id); -} -EXPORT_SYMBOL(drm_get_drawable_info); - -static int drm_drawable_free(int idr, void *p, void *data) -{ - struct drm_drawable_info *info = p; - - if (info) { - kfree(info->rects); - kfree(info); - } - - return 0; -} - -void drm_drawable_free_all(struct drm_device *dev) -{ - idr_for_each(&dev->drw_idr, drm_drawable_free, NULL); - idr_remove_all(&dev->drw_idr); -} diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index ff6690f4fc87..271835a71570 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -91,8 +91,8 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), @@ -127,7 +127,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), @@ -180,10 +180,6 @@ int drm_lastclose(struct drm_device * dev) mutex_lock(&dev->struct_mutex); - /* Free drawable information memory */ - drm_drawable_free_all(dev); - del_timer(&dev->timer); - /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp && !drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 96e963108225..c1a26217a530 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -30,7 +30,6 @@ #include #include #include -#include #include "drmP.h" #include "drm_edid.h" #include "drm_edid_modes.h" @@ -1268,7 +1267,35 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, } #define HDMI_IDENTIFIER 0x000C03 +#define AUDIO_BLOCK 0x01 #define VENDOR_BLOCK 0x03 +#define EDID_BASIC_AUDIO (1 << 6) + +/** + * Search EDID for CEA extension block. + */ +static u8 *drm_find_cea_extension(struct edid *edid) +{ + u8 *edid_ext = NULL; + int i; + + /* No EDID or EDID extensions */ + if (edid == NULL || edid->extensions == 0) + return NULL; + + /* Find CEA extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); + if (edid_ext[0] == CEA_EXT) + break; + } + + if (i == edid->extensions) + return NULL; + + return edid_ext; +} + /** * drm_detect_hdmi_monitor - detect whether monitor is hdmi. * @edid: monitor EDID information @@ -1278,24 +1305,13 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, */ bool drm_detect_hdmi_monitor(struct edid *edid) { - char *edid_ext = NULL; + u8 *edid_ext; int i, hdmi_id; int start_offset, end_offset; bool is_hdmi = false; - /* No EDID or EDID extensions */ - if (edid == NULL || edid->extensions == 0) - goto end; - - /* Find CEA extension */ - for (i = 0; i < edid->extensions; i++) { - edid_ext = (char *)edid + EDID_LENGTH * (i + 1); - /* This block is CEA extension */ - if (edid_ext[0] == 0x02) - break; - } - - if (i == edid->extensions) + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) goto end; /* Data block offset in CEA extension block */ @@ -1325,6 +1341,53 @@ end: } EXPORT_SYMBOL(drm_detect_hdmi_monitor); +/** + * drm_detect_monitor_audio - check monitor audio capability + * + * Monitor should have CEA extension block. + * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic + * audio' only. If there is any audio extension block and supported + * audio format, assume at least 'basic audio' support, even if 'basic + * audio' is not defined in EDID. + * + */ +bool drm_detect_monitor_audio(struct edid *edid) +{ + u8 *edid_ext; + int i, j; + bool has_audio = false; + int start_offset, end_offset; + + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) + goto end; + + has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); + + if (has_audio) { + DRM_DEBUG_KMS("Monitor has basic audio support\n"); + goto end; + } + + /* Data block offset in CEA extension block */ + start_offset = 4; + end_offset = edid_ext[2]; + + for (i = start_offset; i < end_offset; + i += ((edid_ext[i] & 0x1f) + 1)) { + if ((edid_ext[i] >> 5) == AUDIO_BLOCK) { + has_audio = true; + for (j = 1; j < (edid_ext[i] & 0x1f); j += 3) + DRM_DEBUG_KMS("CEA audio format %d\n", + (edid_ext[i + j] >> 3) & 0xf); + goto end; + } + } +end: + return has_audio; +} +EXPORT_SYMBOL(drm_detect_monitor_audio); + /** * drm_add_edid_modes - add modes from EDID data, if available * @connector: connector we're probing diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6a5e403f9aa1..d2849e4ea4d0 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -242,6 +242,30 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) return 0; } +static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper) +{ + uint16_t *r_base, *g_base, *b_base; + int i; + + r_base = crtc->gamma_store; + g_base = r_base + crtc->gamma_size; + b_base = g_base + crtc->gamma_size; + + for (i = 0; i < crtc->gamma_size; i++) + helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i); +} + +static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) +{ + uint16_t *r_base, *g_base, *b_base; + + r_base = crtc->gamma_store; + g_base = r_base + crtc->gamma_size; + b_base = g_base + crtc->gamma_size; + + crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); +} + int drm_fb_helper_debug_enter(struct fb_info *info) { struct drm_fb_helper *helper = info->par; @@ -260,11 +284,12 @@ int drm_fb_helper_debug_enter(struct fb_info *info) continue; funcs = mode_set->crtc->helper_private; + drm_fb_helper_save_lut_atomic(mode_set->crtc, helper); funcs->mode_set_base_atomic(mode_set->crtc, mode_set->fb, mode_set->x, - mode_set->y); - + mode_set->y, + ENTER_ATOMIC_MODE_SET); } } @@ -308,8 +333,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info) continue; } + drm_fb_helper_restore_lut_atomic(mode_set->crtc); funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, - crtc->y); + crtc->y, LEAVE_ATOMIC_MODE_SET); } return 0; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 5663d2719063..ea1c4b019ebf 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -92,12 +92,6 @@ drm_gem_init(struct drm_device *dev) spin_lock_init(&dev->object_name_lock); idr_init(&dev->object_name_idr); - atomic_set(&dev->object_count, 0); - atomic_set(&dev->object_memory, 0); - atomic_set(&dev->pin_count, 0); - atomic_set(&dev->pin_memory, 0); - atomic_set(&dev->gtt_count, 0); - atomic_set(&dev->gtt_memory, 0); mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); if (!mm) { @@ -151,9 +145,6 @@ int drm_gem_object_init(struct drm_device *dev, atomic_set(&obj->handle_count, 0); obj->size = size; - atomic_inc(&dev->object_count); - atomic_add(obj->size, &dev->object_memory); - return 0; } EXPORT_SYMBOL(drm_gem_object_init); @@ -180,8 +171,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) return obj; fput: /* Object_init mangles the global counters - readjust them. */ - atomic_dec(&dev->object_count); - atomic_sub(obj->size, &dev->object_memory); fput(obj->filp); free: kfree(obj); @@ -436,10 +425,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) void drm_gem_object_release(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; fput(obj->filp); - atomic_dec(&dev->object_count); - atomic_sub(obj->size, &dev->object_memory); } EXPORT_SYMBOL(drm_gem_object_release); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 974e970ce3f8..3cdbaf379bb5 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -270,20 +270,6 @@ int drm_gem_name_info(struct seq_file *m, void *data) return 0; } -int drm_gem_object_info(struct seq_file *m, void* data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - - seq_printf(m, "%d objects\n", atomic_read(&dev->object_count)); - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory)); - seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count)); - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory)); - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory)); - seq_printf(m, "%d gtt total\n", dev->gtt_total); - return 0; -} - #if DRM_DEBUG_CODE int drm_vma_info(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 9bf93bc9a32c..632ae243ede0 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -37,6 +37,8 @@ static int drm_notifier(void *priv); +static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); + /** * Lock ioctl. * @@ -124,9 +126,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); } - if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) - dev->driver->dma_ready(dev); - if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) { if (dev->driver->dma_quiescent(dev)) { @@ -136,12 +135,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) } } - if (dev->driver->kernel_context_switch && - dev->last_context != lock->context) { - dev->driver->kernel_context_switch(dev, dev->last_context, - lock->context); - } - return 0; } @@ -169,15 +162,8 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); - /* kernel_context_switch isn't used by any of the x86 drm - * modules but is required by the Sparc driver. - */ - if (dev->driver->kernel_context_switch_unlock) - dev->driver->kernel_context_switch_unlock(dev); - else { - if (drm_lock_free(&master->lock, lock->context)) { - /* FIXME: Should really bail out here. */ - } + if (drm_lock_free(&master->lock, lock->context)) { + /* FIXME: Should really bail out here. */ } unblock_all_signals(); @@ -193,6 +179,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ +static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { @@ -229,7 +216,6 @@ int drm_lock_take(struct drm_lock_data *lock_data, } return 0; } -EXPORT_SYMBOL(drm_lock_take); /** * This takes a lock forcibly and hands it to context. Should ONLY be used @@ -297,7 +283,6 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) wake_up_interruptible(&lock_data->lock_queue); return 0; } -EXPORT_SYMBOL(drm_lock_free); /** * If we get here, it means that the process has called DRM_IOCTL_LOCK @@ -360,7 +345,6 @@ void drm_idlelock_take(struct drm_lock_data *lock_data) } spin_unlock_bh(&lock_data->spinlock); } -EXPORT_SYMBOL(drm_idlelock_take); void drm_idlelock_release(struct drm_lock_data *lock_data) { @@ -380,8 +364,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data) } spin_unlock_bh(&lock_data->spinlock); } -EXPORT_SYMBOL(drm_idlelock_release); - int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { @@ -390,5 +372,3 @@ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && master->lock.file_priv == file_priv); } - -EXPORT_SYMBOL(drm_i_have_hw_lock); diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 7732268eced2..c9b805000a11 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -99,29 +99,23 @@ static void *agp_remap(unsigned long offset, unsigned long size, return addr; } -/** Wrapper around agp_allocate_memory() */ -DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type) -{ - return drm_agp_allocate_memory(dev->agp->bridge, pages, type); -} - /** Wrapper around agp_free_memory() */ -int drm_free_agp(DRM_AGP_MEM * handle, int pages) +void drm_free_agp(DRM_AGP_MEM * handle, int pages) { - return drm_agp_free_memory(handle) ? 0 : -EINVAL; + agp_free_memory(handle); } EXPORT_SYMBOL(drm_free_agp); /** Wrapper around agp_bind_memory() */ int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) { - return drm_agp_bind_memory(handle, start); + return agp_bind_memory(handle, start); } /** Wrapper around agp_unbind_memory() */ int drm_unbind_agp(DRM_AGP_MEM * handle) { - return drm_agp_unbind_memory(handle); + return agp_unbind_memory(handle); } EXPORT_SYMBOL(drm_unbind_agp); diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index a9ba6b69ad35..9e5b07efebb7 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c @@ -55,7 +55,6 @@ static struct drm_info_list drm_proc_list[] = { {"queues", drm_queues_info, 0}, {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, - {"gem_objects", drm_gem_object_info, DRIVER_GEM}, #if DRM_DEBUG_CODE {"vma", drm_vma_info, 0}, #endif @@ -151,7 +150,6 @@ fail: int drm_proc_init(struct drm_minor *minor, int minor_id, struct proc_dir_entry *root) { - struct drm_device *dev = minor->dev; char name[64]; int ret; @@ -172,14 +170,6 @@ int drm_proc_init(struct drm_minor *minor, int minor_id, return ret; } - if (dev->driver->proc_init) { - ret = dev->driver->proc_init(minor); - if (ret) { - DRM_ERROR("DRM: Driver failed to initialize " - "/proc/dri.\n"); - return ret; - } - } return 0; } @@ -216,15 +206,11 @@ int drm_proc_remove_files(struct drm_info_list *files, int count, */ int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) { - struct drm_device *dev = minor->dev; char name[64]; if (!root || !minor->proc_root) return 0; - if (dev->driver->proc_cleanup) - dev->driver->proc_cleanup(minor); - drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor); sprintf(name, "%d", minor->index); diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index 9034c4c6100d..d15e09b0ae0b 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -184,8 +184,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) drm_sg_cleanup(entry); return -ENOMEM; } -EXPORT_SYMBOL(drm_sg_alloc); - int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index d1ad57450df1..cdc89ee042cc 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -240,14 +240,10 @@ int drm_fill_in_dev(struct drm_device *dev, INIT_LIST_HEAD(&dev->vblank_event_list); spin_lock_init(&dev->count_lock); - spin_lock_init(&dev->drw_lock); spin_lock_init(&dev->event_lock); - init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); - idr_init(&dev->drw_idr); - if (drm_ht_create(&dev->map_hash, 12)) { return -ENOMEM; } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 5df450683aab..2c3fcbdfd8ff 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -523,14 +523,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) return 0; } -resource_size_t drm_core_get_map_ofs(struct drm_local_map * map) -{ - return map->offset; -} - -EXPORT_SYMBOL(drm_core_get_map_ofs); - -resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) +static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) { #ifdef __alpha__ return dev->hose->dense_mem_base - dev->hose->mem_space->start; @@ -539,8 +532,6 @@ resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) #endif } -EXPORT_SYMBOL(drm_core_get_reg_ofs); - /** * mmap DMA memory. * @@ -627,7 +618,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) #endif case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: - offset = dev->driver->get_reg_ofs(dev); + offset = drm_core_get_reg_ofs(dev); vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_page_prot = drm_io_prot(map->type, vma); #if !defined(__arm__) diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index fe69914ce507..88bcd331e7c5 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -52,8 +52,6 @@ static struct drm_driver driver = { .device_is_agp = i810_driver_device_is_agp, .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, .dma_quiescent = i810_driver_dma_quiescent, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = i810_ioctls, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c index 5b6298b24e24..f655ab7977da 100644 --- a/drivers/gpu/drm/i830/i830_drv.c +++ b/drivers/gpu/drm/i830/i830_drv.c @@ -57,8 +57,6 @@ static struct drm_driver driver = { .device_is_agp = i830_driver_device_is_agp, .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked, .dma_quiescent = i830_driver_dma_quiescent, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, #if USE_IRQS .irq_preinstall = i830_driver_irq_preinstall, .irq_postinstall = i830_driver_irq_postinstall, diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5c8e53458edb..fdc833d5cc7b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ intel_dvo.o \ intel_ringbuffer.o \ intel_overlay.o \ + intel_opregion.o \ dvo_ch7xxx.o \ dvo_ch7017.o \ dvo_ivch.o \ dvo_tfp410.o \ dvo_sil164.o -i915-$(CONFIG_ACPI) += i915_opregion.o i915-$(CONFIG_COMPAT) += i915_ioc32.o +i915-$(CONFIG_ACPI) += intel_acpi.o + obj-$(CONFIG_DRM_I915) += i915.o CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 14d59804acd7..af70337567ce 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -165,67 +165,44 @@ struct ch7017_priv { static void ch7017_dump_regs(struct intel_dvo_device *dvo); static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); -static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) +static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) { - struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); - u8 out_buf[2]; - u8 in_buf[2]; - struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, - .buf = out_buf, + .buf = &addr, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, - .buf = in_buf, + .buf = val, } }; - - out_buf[0] = addr; - out_buf[1] = 0; - - if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { - *val= in_buf[0]; - return true; - }; - - return false; + return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2; } -static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) +static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) { - struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); - uint8_t out_buf[2]; + uint8_t buf[2] = { addr, val }; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, - .buf = out_buf, + .buf = buf, }; - - out_buf[0] = addr; - out_buf[1] = val; - - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) - return true; - - return false; + return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1; } /** Probes for a CH7017 on the given bus and slave address. */ static bool ch7017_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); struct ch7017_priv *priv; - uint8_t val; + const char *str; + u8 val; priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); if (priv == NULL) @@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo, if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) goto fail; - if (val != CH7017_DEVICE_ID_VALUE && - val != CH7018_DEVICE_ID_VALUE && - val != CH7019_DEVICE_ID_VALUE) { + switch (val) { + case CH7017_DEVICE_ID_VALUE: + str = "ch7017"; + break; + case CH7018_DEVICE_ID_VALUE: + str = "ch7018"; + break; + case CH7019_DEVICE_ID_VALUE: + str = "ch7019"; + break; + default: DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " - "Slave %d.\n", - val, i2cbus->adapter.name,dvo->slave_addr); + "slave %d.\n", + val, adapter->name,dvo->slave_addr); goto fail; } + DRM_DEBUG_KMS("%s detected on %s, addr %d\n", + str, adapter->name, dvo->slave_addr); return true; + fail: kfree(priv); return false; @@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) } /* XXX: Should actually wait for update power status somehow */ - udelay(20000); + msleep(20); } static void ch7017_dump_regs(struct intel_dvo_device *dvo) diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 6f1944b24441..7eaa94e4ff06 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct ch7xxx_priv *ch7xxx= dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; @@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) out_buf[0] = addr; out_buf[1] = 0; - if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { + if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!ch7xxx->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; } @@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!ch7xxx->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index a2ec3f487202..a12ed9414cc7 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[1]; u8 in_buf[2]; @@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) out_buf[0] = addr; - if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) { + if (i2c_transfer(adapter, msgs, 3) == 3) { *data = (in_buf[1] << 8) | in_buf[0]; return true; }; @@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) if (!priv->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from " "%s:%02x.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; } @@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[3]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) out_buf[1] = data & 0xff; out_buf[2] = data >> 8; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!priv->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 9b8e6765cf26..e4b4091df942 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct sil164_priv *sil = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; @@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) out_buf[0] = addr; out_buf[1] = 0; - if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { + if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!sil->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; } @@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct sil164_priv *sil= dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!sil->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 56f66426207f..8ab2855bb544 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; @@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) out_buf[0] = addr; out_buf[1] = 0; - if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) { + if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; } @@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, @@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1) + if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, dvo->slave_addr); + addr, adapter->name, dvo->slave_addr); } return false; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 048149748fdc..1f4f3ceb63c7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -40,9 +40,51 @@ #if defined(CONFIG_DEBUG_FS) -#define ACTIVE_LIST 1 -#define FLUSHING_LIST 2 -#define INACTIVE_LIST 3 +enum { + ACTIVE_LIST, + FLUSHING_LIST, + INACTIVE_LIST, + PINNED_LIST, + DEFERRED_FREE_LIST, +}; + +static const char *yesno(int v) +{ + return v ? "yes" : "no"; +} + +static int i915_capabilities(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + const struct intel_device_info *info = INTEL_INFO(dev); + + seq_printf(m, "gen: %d\n", info->gen); +#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) + B(is_mobile); + B(is_i85x); + B(is_i915g); + B(is_i945gm); + B(is_g33); + B(need_gfx_hws); + B(is_g4x); + B(is_pineview); + B(is_broadwater); + B(is_crestline); + B(has_fbc); + B(has_rc6); + B(has_pipe_cxsr); + B(has_hotplug); + B(cursor_needs_physical); + B(has_overlay); + B(overlay_needs_physical); + B(supports_tv); + B(has_bsd_ring); + B(has_blt_ring); +#undef B + + return 0; +} static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) { @@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) } } +static void +describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) +{ + seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", + &obj->base, + get_pin_flag(obj), + get_tiling_flag(obj), + obj->base.size, + obj->base.read_domains, + obj->base.write_domain, + obj->last_rendering_seqno, + obj->dirty ? " dirty" : "", + obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); + if (obj->base.name) + seq_printf(m, " (name: %d)", obj->base.name); + if (obj->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", obj->fence_reg); + if (obj->gtt_space != NULL) + seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); + if (obj->ring != NULL) + seq_printf(m, " (%s)", obj->ring->name); +} + static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; - spinlock_t *lock = NULL; + size_t total_obj_size, total_gtt_size; + int count, ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; switch (list) { case ACTIVE_LIST: seq_printf(m, "Active:\n"); - lock = &dev_priv->mm.active_list_lock; - head = &dev_priv->render_ring.active_list; + head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: seq_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; + case PINNED_LIST: + seq_printf(m, "Pinned:\n"); + head = &dev_priv->mm.pinned_list; + break; case FLUSHING_LIST: seq_printf(m, "Flushing:\n"); head = &dev_priv->mm.flushing_list; break; + case DEFERRED_FREE_LIST: + seq_printf(m, "Deferred free:\n"); + head = &dev_priv->mm.deferred_free_list; + break; default: - DRM_INFO("Ooops, unexpected list\n"); - return 0; + mutex_unlock(&dev->struct_mutex); + return -EINVAL; } - if (lock) - spin_lock(lock); - list_for_each_entry(obj_priv, head, list) - { - seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s", - &obj_priv->base, - get_pin_flag(obj_priv), - obj_priv->base.size, - obj_priv->base.read_domains, - obj_priv->base.write_domain, - obj_priv->last_rendering_seqno, - obj_priv->dirty ? " dirty" : "", - obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : ""); - - if (obj_priv->base.name) - seq_printf(m, " (name: %d)", obj_priv->base.name); - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)", obj_priv->fence_reg); - if (obj_priv->gtt_space != NULL) - seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset); - + total_obj_size = total_gtt_size = count = 0; + list_for_each_entry(obj_priv, head, mm_list) { + seq_printf(m, " "); + describe_obj(m, obj_priv); seq_printf(m, "\n"); + total_obj_size += obj_priv->base.size; + total_gtt_size += obj_priv->gtt_space->size; + count++; } + mutex_unlock(&dev->struct_mutex); - if (lock) - spin_unlock(lock); + seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", + count, total_obj_size, total_gtt_size); return 0; } +static int i915_gem_object_info(struct seq_file *m, void* data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + seq_printf(m, "%u objects\n", dev_priv->mm.object_count); + seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); + seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); + seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); + seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); + seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); + seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + + static int i915_gem_pageflip_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_request *gem_request; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; seq_printf(m, "Request:\n"); list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, @@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data) gem_request->seqno, (int) (jiffies - gem_request->emitted_jiffies)); } + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", - i915_get_gem_seqno(dev, &dev_priv->render_ring)); + dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); } else { seq_printf(m, "Current sequence: hws uninitialized\n"); } seq_printf(m, "Waiter sequence: %d\n", dev_priv->mm.waiting_gem_seqno); seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; if (!HAS_PCH_SPLIT(dev)) { seq_printf(m, "Interrupt enable: %08x\n", @@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) atomic_read(&dev_priv->irq_received)); if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", - i915_get_gem_seqno(dev, &dev_priv->render_ring)); + dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); } else { seq_printf(m, "Current sequence: hws uninitialized\n"); } @@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) dev_priv->mm.waiting_gem_seqno); seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int i; + int i, ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); @@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "\n"); } } + mutex_unlock(&dev->struct_mutex); return 0; } @@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data) return 0; } -static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) +static void i915_dump_object(struct seq_file *m, + struct io_mapping *mapping, + struct drm_i915_gem_object *obj_priv) { - int page, i; - uint32_t *mem; + int page, page_count, i; + page_count = obj_priv->base.size / PAGE_SIZE; for (page = 0; page < page_count; page++) { - mem = kmap_atomic(pages[page], KM_USER0); + u32 *mem = io_mapping_map_wc(mapping, + obj_priv->gtt_offset + page * PAGE_SIZE); for (i = 0; i < PAGE_SIZE; i += 4) seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); - kunmap_atomic(mem, KM_USER0); + io_mapping_unmap(mem); } } @@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) struct drm_i915_gem_object *obj_priv; int ret; - spin_lock(&dev_priv->mm.active_list_lock); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; - list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, - list) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { obj = &obj_priv->base; if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { - ret = i915_gem_object_get_pages(obj, 0); - if (ret) { - DRM_ERROR("Failed to get pages: %d\n", ret); - spin_unlock(&dev_priv->mm.active_list_lock); - return ret; - } - - seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); - i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); - - i915_gem_object_put_pages(obj); + seq_printf(m, "--- gtt_offset = 0x%08x\n", + obj_priv->gtt_offset); + i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv); } } - spin_unlock(&dev_priv->mm.active_list_lock); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u8 *virt; - uint32_t *ptr, off; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; if (!dev_priv->render_ring.gem_object) { seq_printf(m, "No ringbuffer setup\n"); - return 0; - } + } else { + u8 *virt = dev_priv->render_ring.virtual_start; + uint32_t off; - virt = dev_priv->render_ring.virtual_start; - - for (off = 0; off < dev_priv->render_ring.size; off += 4) { - ptr = (uint32_t *)(virt + off); - seq_printf(m, "%08x : %08x\n", off, *ptr); + for (off = 0; off < dev_priv->render_ring.size; off += 4) { + uint32_t *ptr = (uint32_t *)(virt + off); + seq_printf(m, "%08x : %08x\n", off, *ptr); + } } + mutex_unlock(&dev->struct_mutex); return 0; } @@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) seq_printf(m, "RingHead : %08x\n", head); seq_printf(m, "RingTail : %08x\n", tail); seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); - seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); + seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD)); return 0; } @@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { seq_printf(m, " INSTPS: 0x%08x\n", error->instps); seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); } @@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } else { seq_printf(m, "FBC disabled: "); switch (dev_priv->no_fbc_reason) { + case FBC_NO_OUTPUT: + seq_printf(m, "no outputs"); + break; case FBC_STOLEN_TOO_SMALL: seq_printf(m, "not enough stolen memory"); break; @@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; bool sr_enabled = false; - if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) + if (IS_GEN5(dev)) + sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; + else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev)) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; else if (IS_PINEVIEW(dev)) sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; - seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : - "disabled"); + seq_printf(m, "self-refresh: %s\n", + sr_enabled ? "enabled" : "disabled"); return 0; } @@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; unsigned long temp, chipset, gfx; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; temp = i915_mch_val(dev_priv); chipset = i915_chipset_val(dev_priv); gfx = i915_gfx_val(dev_priv); + mutex_unlock(&dev->struct_mutex); seq_printf(m, "GMCH temp: %ld\n", temp); seq_printf(m, "Chipset power: %ld\n", chipset); @@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused) return 0; } +static int i915_opregion(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + if (opregion->header) + seq_write(m, opregion->header, OPREGION_SIZE); + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_gem_framebuffer_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_fbdev *ifbdev; + struct intel_framebuffer *fb; + int ret; + + ret = mutex_lock_interruptible(&dev->mode_config.mutex); + if (ret) + return ret; + + ifbdev = dev_priv->fbdev; + fb = to_intel_framebuffer(ifbdev->helper.fb); + + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", + fb->base.width, + fb->base.height, + fb->base.depth, + fb->base.bits_per_pixel); + describe_obj(m, to_intel_bo(fb->obj)); + seq_printf(m, "\n"); + + list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { + if (&fb->base == ifbdev->helper.fb) + continue; + + seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", + fb->base.width, + fb->base.height, + fb->base.depth, + fb->base.bits_per_pixel); + describe_obj(m, to_intel_bo(fb->obj)); + seq_printf(m, "\n"); + } + + mutex_unlock(&dev->mode_config.mutex); + + return 0; +} + static int i915_wedged_open(struct inode *inode, struct file *filp) @@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp, "wedged : %d\n", atomic_read(&dev_priv->mm.wedged)); + if (len > sizeof (buf)) + len = sizeof (buf); + return simple_read_from_buffer(ubuf, max, ppos, buf, len); } @@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp, atomic_set(&dev_priv->mm.wedged, val); if (val) { - DRM_WAKEUP(&dev_priv->irq_queue); + wake_up_all(&dev_priv->irq_queue); queue_work(dev_priv->wq, &dev_priv->error_work); } @@ -824,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) } static struct drm_info_list i915_debugfs_list[] = { + {"i915_capabilities", i915_capabilities, 0, 0}, + {"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, + {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, + {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST}, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, @@ -846,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gfxec", i915_gfxec, 0}, {"i915_fbc_status", i915_fbc_status, 0}, {"i915_sr_status", i915_sr_status, 0}, + {"i915_opregion", i915_opregion, 0}, + {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dd2c93ebfa3..7a26f4dd21ae 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -40,8 +40,7 @@ #include #include #include - -extern int intel_max_stolen; /* from AGP driver */ +#include /** * Sets up the hardware status page for devices that need a physical address @@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev) memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 0xf0; @@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev) mutex_lock(&dev->struct_mutex); intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ @@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev) DRM_DEBUG_DRIVER("hw status page @ %p\n", ring->status_page.page_addr); if (ring->status_page.gfx_addr != 0) - ring->setup_status_page(dev, ring); + intel_ring_setup_status_page(dev, ring); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); @@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev, return -EINVAL; } - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { BEGIN_LP_RING(4); OUT_RING(GFX_OP_DRAWRECT_INFO_I965); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); @@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, if (!IS_I830(dev) && !IS_845G(dev)) { BEGIN_LP_RING(2); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); } else { @@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, } - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev)) { BEGIN_LP_RING(2); OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); OUT_RING(MI_NOOP); @@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_BSD: value = HAS_BSD(dev); break; + case I915_PARAM_HAS_BLT: + value = HAS_BLT(dev); + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -888,12 +890,12 @@ static int intel_alloc_mchbar_resource(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; int ret; - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); mchbar_addr = ((u64)temp_hi << 32) | temp_lo; @@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) return ret; } - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) pci_write_config_dword(dev_priv->bridge_dev, reg + 4, upper_32_bits(dev_priv->mch_res.start)); @@ -934,7 +936,7 @@ static void intel_setup_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; bool enabled; @@ -971,7 +973,7 @@ static void intel_teardown_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; if (dev_priv->mchbar_need_disable) { @@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev) release_resource(&dev_priv->mch_res); } -/** - * i915_probe_agp - get AGP bootup configuration - * @pdev: PCI device - * @aperture_size: returns AGP aperture configured size - * @preallocated_size: returns size of BIOS preallocated AGP space - * - * Since Intel integrated graphics are UMA, the BIOS has to set aside - * some RAM for the framebuffer at early boot. This code figures out - * how much was set aside so we can use it for our own purposes. - */ -static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, - uint32_t *preallocated_size, - uint32_t *start) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u16 tmp = 0; - unsigned long overhead; - unsigned long stolen; - - /* Get the fb aperture size and "stolen" memory amount. */ - pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp); - - *aperture_size = 1024 * 1024; - *preallocated_size = 1024 * 1024; - - switch (dev->pdev->device) { - case PCI_DEVICE_ID_INTEL_82830_CGC: - case PCI_DEVICE_ID_INTEL_82845G_IG: - case PCI_DEVICE_ID_INTEL_82855GM_IG: - case PCI_DEVICE_ID_INTEL_82865_IG: - if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) - *aperture_size *= 64; - else - *aperture_size *= 128; - break; - default: - /* 9xx supports large sizes, just look at the length */ - *aperture_size = pci_resource_len(dev->pdev, 2); - break; - } - - /* - * Some of the preallocated space is taken by the GTT - * and popup. GTT is 1K per MB of aperture size, and popup is 4K. - */ - if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) - overhead = 4096; - else - overhead = (*aperture_size / 1024) + 4096; - - if (IS_GEN6(dev)) { - /* SNB has memory control reg at 0x50.w */ - pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp); - - switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) { - case INTEL_855_GMCH_GMS_DISABLED: - DRM_ERROR("video memory is disabled\n"); - return -1; - case SNB_GMCH_GMS_STOLEN_32M: - stolen = 32 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_64M: - stolen = 64 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_96M: - stolen = 96 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_128M: - stolen = 128 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_160M: - stolen = 160 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_192M: - stolen = 192 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_224M: - stolen = 224 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_256M: - stolen = 256 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_288M: - stolen = 288 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_320M: - stolen = 320 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_352M: - stolen = 352 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_384M: - stolen = 384 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_416M: - stolen = 416 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_448M: - stolen = 448 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_480M: - stolen = 480 * 1024 * 1024; - break; - case SNB_GMCH_GMS_STOLEN_512M: - stolen = 512 * 1024 * 1024; - break; - default: - DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", - tmp & SNB_GMCH_GMS_STOLEN_MASK); - return -1; - } - } else { - switch (tmp & INTEL_GMCH_GMS_MASK) { - case INTEL_855_GMCH_GMS_DISABLED: - DRM_ERROR("video memory is disabled\n"); - return -1; - case INTEL_855_GMCH_GMS_STOLEN_1M: - stolen = 1 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_4M: - stolen = 4 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_8M: - stolen = 8 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_16M: - stolen = 16 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_32M: - stolen = 32 * 1024 * 1024; - break; - case INTEL_915G_GMCH_GMS_STOLEN_48M: - stolen = 48 * 1024 * 1024; - break; - case INTEL_915G_GMCH_GMS_STOLEN_64M: - stolen = 64 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_128M: - stolen = 128 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_256M: - stolen = 256 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_96M: - stolen = 96 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_160M: - stolen = 160 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_224M: - stolen = 224 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_352M: - stolen = 352 * 1024 * 1024; - break; - default: - DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", - tmp & INTEL_GMCH_GMS_MASK); - return -1; - } - } - - *preallocated_size = stolen - overhead; - *start = overhead; - - return 0; -} - #define PTE_ADDRESS_MASK 0xfffff000 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) @@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, { unsigned long *gtt; unsigned long entry, phys; - int gtt_bar = IS_I9XX(dev) ? 0 : 1; + int gtt_bar = IS_GEN2(dev) ? 1 : 0; int gtt_offset, gtt_size; - if (IS_I965G(dev)) { - if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { + if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) { gtt_offset = 2*1024*1024; gtt_size = 2*1024*1024; } else { @@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); /* Mask out these reserved bits on this hardware. */ - if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || - IS_I945G(dev) || IS_I945GM(dev)) { + if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev)) entry &= ~PTE_ADDRESS_MASK_HIGH; - } /* If it's not a mapping type we know, then bail. */ if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && @@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) unsigned long ll_base = 0; /* Leave 1M for line length buffer & misc. */ - compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); + compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0); if (!compressed_fb) { dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; i915_warn_stolen(dev); @@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) } if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { - compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, + compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096, 4096, 0); if (!compressed_llb) { i915_warn_stolen(dev); @@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); i915_resume(dev); - drm_kms_helper_poll_enable(dev); } else { printk(KERN_ERR "i915: switched off\n"); - drm_kms_helper_poll_disable(dev); i915_suspend(dev, pmm); } } @@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) } static int i915_load_modeset_init(struct drm_device *dev, - unsigned long prealloc_start, unsigned long prealloc_size, unsigned long agp_size) { struct drm_i915_private *dev_priv = dev->dev_private; - int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; - dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & - 0xff000000; - - /* Basic memrange allocator for stolen space (aka vram) */ - drm_mm_init(&dev_priv->vram, 0, prealloc_size); - DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); - - /* We're off and running w/KMS */ - dev_priv->mm.suspended = 0; + /* Basic memrange allocator for stolen space (aka mm.vram) */ + drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); /* Let GEM Manage from end of prealloc space to end of aperture. * @@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev, */ dev_priv->allow_batchbuffer = 1; - ret = intel_init_bios(dev); + ret = intel_parse_bios(dev); if (ret) DRM_INFO("failed to find VBIOS tables\n"); @@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev, if (ret) goto cleanup_ringbuffer; + intel_register_dsm_handler(); + ret = vga_switcheroo_register_client(dev->pdev, i915_switcheroo_set_state, i915_switcheroo_can_switch); @@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev, /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; - /* - * Initialize the hardware status page IRQ location. - */ - - I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); - ret = intel_fbdev_init(dev); if (ret) goto cleanup_irq; drm_kms_helper_poll_init(dev); + + /* We're off and running w/KMS */ + dev_priv->mm.suspended = 0; + return 0; cleanup_irq: @@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev; * - dev_priv->fmax * - dev_priv->gpu_busy */ -DEFINE_SPINLOCK(mchdev_lock); +static DEFINE_SPINLOCK(mchdev_lock); /** * i915_read_mch_val - return value for IPS use @@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) struct drm_i915_private *dev_priv; resource_size_t base, size; int ret = 0, mmio_bar; - uint32_t agp_size, prealloc_size, prealloc_start; + uint32_t agp_size, prealloc_size; /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->info = (struct intel_device_info *) flags; /* Add register map (needed for suspend/resume) */ - mmio_bar = IS_I9XX(dev) ? 0 : 1; + mmio_bar = IS_GEN2(dev) ? 1 : 0; base = pci_resource_start(dev->pdev, mmio_bar); size = pci_resource_len(dev->pdev, mmio_bar); @@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) "performance may suffer.\n"); } - ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); - if (ret) + dev_priv->mm.gtt = intel_gtt_get(); + if (!dev_priv->mm.gtt) { + DRM_ERROR("Failed to initialize GTT\n"); + ret = -ENODEV; goto out_iomapfree; - - if (prealloc_size > intel_max_stolen) { - DRM_INFO("detected %dM stolen memory, trimming to %dM\n", - prealloc_size >> 20, intel_max_stolen >> 20); - prealloc_size = intel_max_stolen; } - dev_priv->wq = create_singlethread_workqueue("i915"); + prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; + agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + + /* The i915 workqueue is primarily used for batched retirement of + * requests (and thus managing bo) once the task has been completed + * by the GPU. i915_gem_retire_requests() is called directly when we + * need high-priority retirement, such as waiting for an explicit + * bo. + * + * It is also used for periodic low-priority events, such as + * idle-timers and hangcheck. + * + * All tasks on the workqueue are expected to acquire the dev mutex + * so there is no point in running more than one instance of the + * workqueue at any time: max_active = 1 and NON_REENTRANT. + */ + dev_priv->wq = alloc_workqueue("i915", + WQ_UNBOUND | WQ_NON_REENTRANT, + 1); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; @@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); + intel_setup_gmbus(dev); + intel_opregion_setup(dev); + + /* Make sure the bios did its job and set up vital registers */ + intel_setup_bios(dev); i915_gem_load(dev); @@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_PINEVIEW(dev)) i915_pineview_get_mem_freq(dev); - else if (IS_IRONLAKE(dev)) + else if (IS_GEN5(dev)) i915_ironlake_get_mem_freq(dev); /* On the 945G/GM, the chipset reports the MSI capability on the @@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_detect_pch(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev, prealloc_start, - prealloc_size, agp_size); + ret = i915_load_modeset_init(dev, prealloc_size, agp_size); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_workqueue_free; @@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } /* Must be done after probing outputs */ - intel_opregion_init(dev, 0); + intel_opregion_init(dev); + acpi_video_register(); setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); @@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->mchdev_lock = &mchdev_lock; spin_unlock(&mchdev_lock); - /* XXX Prevent module unload due to memory corruption bugs. */ - __module_get(THIS_MODULE); - return 0; out_workqueue_free: @@ -2252,15 +2090,20 @@ free_priv: int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - - i915_destroy_error_state(dev); + int ret; spin_lock(&mchdev_lock); i915_mch_dev = NULL; spin_unlock(&mchdev_lock); - destroy_workqueue(dev_priv->wq); - del_timer_sync(&dev_priv->hangcheck_timer); + mutex_lock(&dev->struct_mutex); + ret = i915_gpu_idle(dev); + if (ret) + DRM_ERROR("failed to idle hardware: %d\n", ret); + mutex_unlock(&dev->struct_mutex); + + /* Cancel the retire work handler, which should be idle now. */ + cancel_delayed_work_sync(&dev_priv->mm.retire_work); io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { @@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev) dev_priv->mm.gtt_mtrr = -1; } + acpi_video_unregister(); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + intel_fbdev_fini(dev); intel_modeset_cleanup(dev); /* @@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev) dev_priv->child_dev = NULL; dev_priv->child_dev_num = 0; } - drm_irq_uninstall(dev); + vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } + /* Free error state after interrupts are fully disabled. */ + del_timer_sync(&dev_priv->hangcheck_timer); + cancel_work_sync(&dev_priv->error_work); + i915_destroy_error_state(dev); + if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); - if (dev_priv->regs != NULL) - iounmap(dev_priv->regs); - - intel_opregion_free(dev, 0); + intel_opregion_fini(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Flush any outstanding unpin_work. */ + flush_workqueue(dev_priv->wq); + i915_gem_free_all_phys_object(dev); mutex_lock(&dev->struct_mutex); @@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); if (I915_HAS_FBC(dev) && i915_powersave) i915_cleanup_compression(dev); - drm_mm_takedown(&dev_priv->vram); - i915_gem_lastclose(dev); + drm_mm_takedown(&dev_priv->mm.vram); intel_cleanup_overlay(dev); + + if (!I915_NEED_GFX_HWS(dev)) + i915_free_hws(dev); } + if (dev_priv->regs != NULL) + iounmap(dev_priv->regs); + + intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); + destroy_workqueue(dev_priv->wq); + pci_dev_put(dev_priv->bridge_dev); kfree(dev->dev_private); return 0; } -int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) +int i915_driver_open(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *i915_file_priv; + struct drm_i915_file_private *file_priv; DRM_DEBUG_DRIVER("\n"); - i915_file_priv = (struct drm_i915_file_private *) - kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); - - if (!i915_file_priv) + file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); + if (!file_priv) return -ENOMEM; - file_priv->driver_priv = i915_file_priv; + file->driver_priv = file_priv; - INIT_LIST_HEAD(&i915_file_priv->mm.request_list); + spin_lock_init(&file_priv->mm.lock); + INIT_LIST_HEAD(&file_priv->mm.request_list); return 0; } @@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) i915_mem_release(dev, file_priv, dev_priv->agp_heap); } -void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) +void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; + struct drm_i915_file_private *file_priv = file->driver_priv; - kfree(i915_file_priv); + kfree(file_priv); } struct drm_ioctl_desc i915_ioctls[] = { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 895ab896e336..3467dd420760 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -32,6 +32,7 @@ #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" +#include "intel_drv.h" #include #include "drm_crtc_helper.h" @@ -61,86 +62,110 @@ extern int intel_agp_enabled; .driver_data = (unsigned long) info } static const struct intel_device_info intel_i830_info = { - .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_845g_info = { - .gen = 2, .is_i8xx = 1, + .gen = 2, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i85x_info = { - .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, + .gen = 2, .is_i85x = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i865g_info = { - .gen = 2, .is_i8xx = 1, + .gen = 2, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915g_info = { - .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, + .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { - .gen = 3, .is_i9xx = 1, .is_mobile = 1, + .gen = 3, .is_mobile = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, }; static const struct intel_device_info intel_i945g_info = { - .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { - .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, + .gen = 3, .is_i945gm = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, + .has_overlay = 1, .overlay_needs_physical = 1, + .supports_tv = 1, }; static const struct intel_device_info intel_i965g_info = { - .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, + .gen = 4, .is_broadwater = 1, .has_hotplug = 1, + .has_overlay = 1, }; static const struct intel_device_info intel_i965gm_info = { - .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, + .gen = 4, .is_crestline = 1, .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, + .has_overlay = 1, + .supports_tv = 1, }; static const struct intel_device_info intel_g33_info = { - .gen = 3, .is_g33 = 1, .is_i9xx = 1, + .gen = 3, .is_g33 = 1, .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, }; static const struct intel_device_info intel_g45_info = { - .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_gm45_info = { - .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, + .gen = 4, .is_g4x = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, + .supports_tv = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_pineview_info = { - .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, + .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, + .has_overlay = 1, }; static const struct intel_device_info intel_ironlake_d_info = { - .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, + .gen = 5, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_ironlake_m_info = { - .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, + .gen = 5, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, + .has_bsd_ring = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { - .gen = 6, .is_i965g = 1, .is_i9xx = 1, + .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, + .has_blt_ring = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { - .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, + .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, + .has_bsd_ring = 1, + .has_blt_ring = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ @@ -237,7 +262,7 @@ static int i915_drm_freeze(struct drm_device *dev) i915_save_state(dev); - intel_opregion_free(dev, 1); + intel_opregion_fini(dev); /* Modeset on resume, not lid events */ dev_priv->modeset_on_lid = 0; @@ -258,6 +283,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) if (state.event == PM_EVENT_PRETHAW) return 0; + drm_kms_helper_poll_disable(dev); + error = i915_drm_freeze(dev); if (error) return error; @@ -277,8 +304,7 @@ static int i915_drm_thaw(struct drm_device *dev) int error = 0; i915_restore_state(dev); - - intel_opregion_init(dev, 1); + intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -294,6 +320,8 @@ static int i915_drm_thaw(struct drm_device *dev) drm_helper_resume_force_mode(dev); } + intel_opregion_init(dev); + dev_priv->modeset_on_lid = 0; return error; @@ -301,12 +329,79 @@ static int i915_drm_thaw(struct drm_device *dev) int i915_resume(struct drm_device *dev) { + int ret; + if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); - return i915_drm_thaw(dev); + ret = i915_drm_thaw(dev); + if (ret) + return ret; + + drm_kms_helper_poll_enable(dev); + return 0; +} + +static int i8xx_do_reset(struct drm_device *dev, u8 flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_I85X(dev)) + return -ENODEV; + + I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + if (IS_I830(dev) || IS_845G(dev)) { + I915_WRITE(DEBUG_RESET_I830, + DEBUG_RESET_DISPLAY | + DEBUG_RESET_RENDER | + DEBUG_RESET_FULL); + POSTING_READ(DEBUG_RESET_I830); + msleep(1); + + I915_WRITE(DEBUG_RESET_I830, 0); + POSTING_READ(DEBUG_RESET_I830); + } + + msleep(1); + + I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + return 0; +} + +static int i965_reset_complete(struct drm_device *dev) +{ + u8 gdrst; + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + return gdrst & 0x1; +} + +static int i965_do_reset(struct drm_device *dev, u8 flags) +{ + u8 gdrst; + + /* + * Set the domains we want to reset (GRDOM/bits 2 and 3) as + * well as the reset bit (GR/bit 0). Setting the GR bit + * triggers the reset; when done, the hardware will clear it. + */ + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); + + return wait_for(i965_reset_complete(dev), 500); +} + +static int ironlake_do_reset(struct drm_device *dev, u8 flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); + return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); } /** @@ -325,54 +420,39 @@ int i915_resume(struct drm_device *dev) * - re-init interrupt state * - re-init display */ -int i965_reset(struct drm_device *dev, u8 flags) +int i915_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; - unsigned long timeout; - u8 gdrst; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; + int ret; mutex_lock(&dev->struct_mutex); - /* - * Clear request list - */ - i915_gem_retire_requests(dev); + i915_gem_reset(dev); - if (need_display) - i915_save_display(dev); - - if (IS_I965G(dev) || IS_G4X(dev)) { - /* - * Set the domains we want to reset, then the reset bit (bit 0). - * Clear the reset bit after a while and wait for hardware status - * bit (bit 1) to be set - */ - pci_read_config_byte(dev->pdev, GDRST, &gdrst); - pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0)); - udelay(50); - pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe); - - /* ...we don't want to loop forever though, 500ms should be plenty */ - timeout = jiffies + msecs_to_jiffies(500); - do { - udelay(100); - pci_read_config_byte(dev->pdev, GDRST, &gdrst); - } while ((gdrst & 0x1) && time_after(timeout, jiffies)); - - if (gdrst & 0x1) { - WARN(true, "i915: Failed to reset chip\n"); - mutex_unlock(&dev->struct_mutex); - return -EIO; - } - } else { - DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); + ret = -ENODEV; + if (get_seconds() - dev_priv->last_gpu_reset < 5) { + DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); + } else switch (INTEL_INFO(dev)->gen) { + case 5: + ret = ironlake_do_reset(dev, flags); + break; + case 4: + ret = i965_do_reset(dev, flags); + break; + case 2: + ret = i8xx_do_reset(dev, flags); + break; + } + dev_priv->last_gpu_reset = get_seconds(); + if (ret) { + DRM_ERROR("Failed to reset chip.\n"); mutex_unlock(&dev->struct_mutex); - return -ENODEV; + return ret; } /* Ok, now get things going again... */ @@ -400,13 +480,19 @@ int i965_reset(struct drm_device *dev, u8 flags) mutex_lock(&dev->struct_mutex); } - /* - * Display needs restore too... - */ - if (need_display) - i915_restore_display(dev); - mutex_unlock(&dev->struct_mutex); + + /* + * Perform a full modeset as on later generations, e.g. Ironlake, we may + * need to retrain the display link and cannot just restore the register + * values. + */ + if (need_display) { + mutex_lock(&dev->mode_config.mutex); + drm_helper_resume_force_mode(dev); + mutex_unlock(&dev->mode_config.mutex); + } + return 0; } @@ -524,8 +610,6 @@ static struct drm_driver driver = { .irq_uninstall = i915_driver_irq_uninstall, .irq_handler = i915_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .master_create = i915_master_create, .master_destroy = i915_master_destroy, #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index af4a263cf257..2c2c19b6285e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -34,6 +34,8 @@ #include "intel_bios.h" #include "intel_ringbuffer.h" #include +#include +#include /* General customization: */ @@ -73,11 +75,9 @@ enum plane { #define DRIVER_PATCHLEVEL 0 #define WATCH_COHERENCY 0 -#define WATCH_BUF 0 #define WATCH_EXEC 0 -#define WATCH_LRU 0 #define WATCH_RELOC 0 -#define WATCH_INACTIVE 0 +#define WATCH_LISTS 0 #define WATCH_PWRITE 0 #define I915_GEM_PHYS_CURSOR_0 1 @@ -110,8 +110,9 @@ struct intel_opregion { struct opregion_acpi *acpi; struct opregion_swsci *swsci; struct opregion_asle *asle; - int enabled; + void *vbt; }; +#define OPREGION_SIZE (8*1024) struct intel_overlay; struct intel_overlay_error_state; @@ -125,13 +126,16 @@ struct drm_i915_master_private { struct drm_i915_fence_reg { struct drm_gem_object *obj; struct list_head lru_list; + bool gpu; }; struct sdvo_device_mapping { + u8 initialized; u8 dvo_port; u8 slave_addr; u8 dvo_wiring; - u8 initialized; + u8 i2c_pin; + u8 i2c_speed; u8 ddc_pin; }; @@ -193,28 +197,29 @@ struct drm_i915_display_funcs { struct intel_device_info { u8 gen; u8 is_mobile : 1; - u8 is_i8xx : 1; u8 is_i85x : 1; u8 is_i915g : 1; - u8 is_i9xx : 1; u8 is_i945gm : 1; - u8 is_i965g : 1; - u8 is_i965gm : 1; u8 is_g33 : 1; u8 need_gfx_hws : 1; u8 is_g4x : 1; u8 is_pineview : 1; u8 is_broadwater : 1; u8 is_crestline : 1; - u8 is_ironlake : 1; u8 has_fbc : 1; u8 has_rc6 : 1; u8 has_pipe_cxsr : 1; u8 has_hotplug : 1; u8 cursor_needs_physical : 1; + u8 has_overlay : 1; + u8 overlay_needs_physical : 1; + u8 supports_tv : 1; + u8 has_bsd_ring : 1; + u8 has_blt_ring : 1; }; enum no_fbc_reason { + FBC_NO_OUTPUT, /* no outputs enabled to compress */ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ FBC_MODE_TOO_LARGE, /* mode too large for compression */ @@ -241,9 +246,16 @@ typedef struct drm_i915_private { void __iomem *regs; + struct intel_gmbus { + struct i2c_adapter adapter; + struct i2c_adapter *force_bit; + u32 reg0; + } *gmbus; + struct pci_dev *bridge_dev; struct intel_ring_buffer render_ring; struct intel_ring_buffer bsd_ring; + struct intel_ring_buffer blt_ring; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; @@ -263,6 +275,9 @@ typedef struct drm_i915_private { int front_offset; int current_page; int page_flipping; +#define I915_DEBUG_READ (1<<0) +#define I915_DEBUG_WRITE (1<<1) + unsigned long debug_flags; wait_queue_head_t irq_queue; atomic_t irq_received; @@ -289,24 +304,21 @@ typedef struct drm_i915_private { unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; int num_pipe; - u32 flush_rings; -#define FLUSH_RENDER_RING 0x1 -#define FLUSH_BSD_RING 0x2 /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ +#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */ struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd; uint32_t last_instdone; uint32_t last_instdone1; - struct drm_mm vram; - unsigned long cfb_size; unsigned long cfb_pitch; + unsigned long cfb_offset; int cfb_fence; int cfb_plane; + int cfb_y; int irq_enabled; @@ -316,8 +328,7 @@ typedef struct drm_i915_private { struct intel_overlay *overlay; /* LVDS info */ - int backlight_duty_cycle; /* restore backlight to this value */ - bool panel_wants_dither; + int backlight_level; /* restore backlight to this value */ struct drm_display_mode *panel_fixed_mode; struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ @@ -328,13 +339,23 @@ typedef struct drm_i915_private { unsigned int lvds_vbt:1; unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; - unsigned int edp_support:1; int lvds_ssc_freq; - int edp_bpp; + struct { + int rate; + int lanes; + int preemphasis; + int vswing; + + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; + } edp; + bool no_aux_handshake; struct notifier_block lid_notifier; - int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */ + int crt_ddc_pin; struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ @@ -344,6 +365,7 @@ typedef struct drm_i915_private { spinlock_t error_lock; struct drm_i915_error_state *first_error; struct work_struct error_work; + struct completion error_completion; struct workqueue_struct *wq; /* Display functions */ @@ -507,6 +529,11 @@ typedef struct drm_i915_private { u32 saveMCHBAR_RENDER_STANDBY; struct { + /** Bridge to intel-gtt-ko */ + struct intel_gtt *gtt; + /** Memory allocator for GTT stolen memory */ + struct drm_mm vram; + /** Memory allocator for GTT */ struct drm_mm gtt_space; struct io_mapping *gtt_mapping; @@ -521,7 +548,16 @@ typedef struct drm_i915_private { */ struct list_head shrink_list; - spinlock_t active_list_lock; + /** + * List of objects currently involved in rendering. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; /** * List of objects which are not in the ringbuffer but which @@ -534,15 +570,6 @@ typedef struct drm_i915_private { */ struct list_head flushing_list; - /** - * List of objects currently pending a GPU write flush. - * - * All elements on this list will belong to either the - * active_list or flushing_list, last_rendering_seqno can - * be used to differentiate between the two elements. - */ - struct list_head gpu_write_list; - /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. @@ -555,6 +582,12 @@ typedef struct drm_i915_private { */ struct list_head inactive_list; + /** + * LRU list of objects which are not in the ringbuffer but + * are still pinned in the GTT. + */ + struct list_head pinned_list; + /** LRU list of objects with fence regs on them. */ struct list_head fence_list; @@ -611,6 +644,17 @@ typedef struct drm_i915_private { /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; + + uint32_t flush_rings; + + /* accounting, useful for userland debugging */ + size_t object_memory; + size_t pin_memory; + size_t gtt_memory; + size_t gtt_total; + u32 object_count; + u32 pin_count; + u32 gtt_count; } mm; struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ @@ -626,8 +670,6 @@ typedef struct drm_i915_private { /* Reclocking support */ bool render_reclock_avail; bool lvds_downclock_avail; - /* indicate whether the LVDS EDID is OK */ - bool lvds_edid_good; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; struct work_struct idle_work; @@ -661,6 +703,8 @@ typedef struct drm_i915_private { struct drm_mm_node *compressed_fb; struct drm_mm_node *compressed_llb; + unsigned long last_gpu_reset; + /* list of fbdev register on this device */ struct intel_fbdev *fbdev; } drm_i915_private_t; @@ -673,7 +717,8 @@ struct drm_i915_gem_object { struct drm_mm_node *gtt_space; /** This object's place on the active/flushing/inactive lists */ - struct list_head list; + struct list_head ring_list; + struct list_head mm_list; /** This object's place on GPU write list */ struct list_head gpu_write_list; /** This object's place on eviction list */ @@ -816,12 +861,14 @@ struct drm_i915_gem_request { /** global list entry for this request */ struct list_head list; + struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_list; }; struct drm_i915_file_private { struct { + struct spinlock lock; struct list_head request_list; } mm; }; @@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, extern int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4); -extern int i965_reset(struct drm_device *dev, u8 flags); +extern int i915_reset(struct drm_device *dev, u8 flags); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); @@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); -void i915_destroy_error_state(struct drm_device *dev); extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, @@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void intel_enable_asle (struct drm_device *dev); +#ifdef CONFIG_DEBUG_FS +extern void i915_destroy_error_state(struct drm_device *dev); +#else +#define i915_destroy_error_state(x) +#endif + /* i915_mem.c */ extern int i915_mem_alloc(struct drm_device *dev, void *data, @@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap); extern void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, struct mem_block *heap); /* i915_gem.c */ +int i915_gem_check_is_wedged(struct drm_device *dev); int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_create_ioctl(struct drm_device *dev, void *data, @@ -972,13 +1025,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); -uint32_t i915_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring); -bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); -int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); -int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); + +/** + * Returns true if seq1 is later than seq2. + */ +static inline bool +i915_seqno_passed(uint32_t seq1, uint32_t seq2) +{ + return (int32_t)(seq1 - seq2) >= 0; +} + +int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, + bool interruptible); +int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, + bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_retire_work_handler(struct work_struct *work); +void i915_gem_reset(struct drm_device *dev); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, @@ -990,16 +1052,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, int i915_gpu_idle(struct drm_device *dev); int i915_gem_idle(struct drm_device *dev); uint32_t i915_add_request(struct drm_device *dev, - struct drm_file *file_priv, - uint32_t flush_domains, - struct intel_ring_buffer *ring); + struct drm_file *file_priv, + struct drm_i915_gem_request *request, + struct intel_ring_buffer *ring); int i915_do_wait_request(struct drm_device *dev, - uint32_t seqno, int interruptible, - struct intel_ring_buffer *ring); + uint32_t seqno, + bool interruptible, + struct intel_ring_buffer *ring); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); -int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); +int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, + bool pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_gem_object *obj, int id, @@ -1007,10 +1071,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev, void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); -int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); -void i915_gem_object_put_pages(struct drm_gem_object *obj); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); -int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); void i915_gem_shrinker_init(void); void i915_gem_shrinker_exit(void); @@ -1032,15 +1093,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, /* i915_gem_debug.c */ void i915_gem_dump_object(struct drm_gem_object *obj, int len, const char *where, uint32_t mark); -#if WATCH_INACTIVE -void i915_verify_inactive(struct drm_device *dev, char *file, int line); +#if WATCH_LISTS +int i915_verify_lists(struct drm_device *dev); #else -#define i915_verify_inactive(dev, file, line) +#define i915_verify_lists(dev) 0 #endif void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); void i915_gem_dump_object(struct drm_gem_object *obj, int len, const char *where, uint32_t mark); -void i915_dump_lru(struct drm_device *dev, const char *where); /* i915_debugfs.c */ int i915_debugfs_init(struct drm_minor *minor); @@ -1054,21 +1114,42 @@ extern int i915_restore_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); +/* intel_i2c.c */ +extern int intel_setup_gmbus(struct drm_device *dev); +extern void intel_teardown_gmbus(struct drm_device *dev); +extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); +extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); +extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) +{ + return container_of(adapter, struct intel_gmbus, adapter)->force_bit; +} +extern void intel_i2c_reset(struct drm_device *dev); + +/* intel_opregion.c */ +extern int intel_opregion_setup(struct drm_device *dev); #ifdef CONFIG_ACPI -/* i915_opregion.c */ -extern int intel_opregion_init(struct drm_device *dev, int resume); -extern void intel_opregion_free(struct drm_device *dev, int suspend); -extern void opregion_asle_intr(struct drm_device *dev); -extern void ironlake_opregion_gse_intr(struct drm_device *dev); -extern void opregion_enable_asle(struct drm_device *dev); +extern void intel_opregion_init(struct drm_device *dev); +extern void intel_opregion_fini(struct drm_device *dev); +extern void intel_opregion_asle_intr(struct drm_device *dev); +extern void intel_opregion_gse_intr(struct drm_device *dev); +extern void intel_opregion_enable_asle(struct drm_device *dev); #else -static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } -static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } -static inline void opregion_asle_intr(struct drm_device *dev) { return; } -static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; } -static inline void opregion_enable_asle(struct drm_device *dev) { return; } +static inline void intel_opregion_init(struct drm_device *dev) { return; } +static inline void intel_opregion_fini(struct drm_device *dev) { return; } +static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } +static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } #endif +/* intel_acpi.c */ +#ifdef CONFIG_ACPI +extern void intel_register_dsm_handler(void); +extern void intel_unregister_dsm_handler(void); +#else +static inline void intel_register_dsm_handler(void) { return; } +static inline void intel_unregister_dsm_handler(void) { return; } +#endif /* CONFIG_ACPI */ + /* modesetting */ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); @@ -1084,8 +1165,10 @@ extern void intel_detect_pch (struct drm_device *dev); extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); /* overlay */ +#ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); +#endif /** * Lock test for when it's just for synchronization of ring access. @@ -1099,8 +1182,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove LOCK_TEST_WITH_RETURN(dev, file_priv); \ } while (0) -#define I915_READ(reg) readl(dev_priv->regs + (reg)) -#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) +static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg) +{ + u32 val; + + val = readl(dev_priv->regs + reg); + if (dev_priv->debug_flags & I915_DEBUG_READ) + printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg); + return val; +} + +static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, + u32 val) +{ + writel(val, dev_priv->regs + reg); + if (dev_priv->debug_flags & I915_DEBUG_WRITE) + printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg); +} + +#define I915_READ(reg) i915_read(dev_priv, (reg)) +#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val)) #define I915_READ16(reg) readw(dev_priv->regs + (reg)) #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) #define I915_READ8(reg) readb(dev_priv->regs + (reg)) @@ -1110,6 +1211,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove #define POSTING_READ(reg) (void)I915_READ(reg) #define POSTING_READ16(reg) (void)I915_READ16(reg) +#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \ + I915_DEBUG_WRITE) +#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \ + I915_DEBUG_WRITE)) + #define I915_VERBOSE 0 #define BEGIN_LP_RING(n) do { \ @@ -1166,8 +1272,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) #define IS_I945G(dev) ((dev)->pci_device == 0x2772) #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) -#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) -#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) @@ -1178,8 +1282,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) -#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) -#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) @@ -1188,33 +1290,34 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) -#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) +#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) +#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) +#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) +#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ -#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ +#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) -#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev)) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) -#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) +#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) -#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ - !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \ - !IS_GEN6(dev)) +#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) /* dsparb controlled by hw only */ #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) -#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) +#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) -#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ - IS_GEN6(dev)) -#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) +#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) +#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eb6c473c6d1b..8eb8453208b5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -37,7 +37,9 @@ #include static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); -static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); + +static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, + bool pipelined); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, @@ -46,7 +48,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); +static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, + bool interruptible); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); @@ -55,9 +58,111 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o struct drm_file *file_priv); static void i915_gem_free_object_tail(struct drm_gem_object *obj); +static int +i915_gem_object_get_pages(struct drm_gem_object *obj, + gfp_t gfpmask); + +static void +i915_gem_object_put_pages(struct drm_gem_object *obj); + static LIST_HEAD(shrink_list); static DEFINE_SPINLOCK(shrink_list_lock); +/* some bookkeeping */ +static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.object_count++; + dev_priv->mm.object_memory += size; +} + +static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.object_count--; + dev_priv->mm.object_memory -= size; +} + +static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.gtt_count++; + dev_priv->mm.gtt_memory += size; +} + +static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.gtt_count--; + dev_priv->mm.gtt_memory -= size; +} + +static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.pin_count++; + dev_priv->mm.pin_memory += size; +} + +static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, + size_t size) +{ + dev_priv->mm.pin_count--; + dev_priv->mm.pin_memory -= size; +} + +int +i915_gem_check_is_wedged(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct completion *x = &dev_priv->error_completion; + unsigned long flags; + int ret; + + if (!atomic_read(&dev_priv->mm.wedged)) + return 0; + + ret = wait_for_completion_interruptible(x); + if (ret) + return ret; + + /* Success, we reset the GPU! */ + if (!atomic_read(&dev_priv->mm.wedged)) + return 0; + + /* GPU is hung, bump the completion count to account for + * the token we just consumed so that we never hit zero and + * end up waiting upon a subsequent completion event that + * will never happen. + */ + spin_lock_irqsave(&x->wait.lock, flags); + x->done++; + spin_unlock_irqrestore(&x->wait.lock, flags); + return -EIO; +} + +static int i915_mutex_lock_interruptible(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = i915_gem_check_is_wedged(dev); + if (ret) + return ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + if (atomic_read(&dev_priv->mm.wedged)) { + mutex_unlock(&dev->struct_mutex); + return -EAGAIN; + } + + WARN_ON(i915_verify_lists(dev)); + return 0; +} + static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) { @@ -66,7 +171,8 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) obj_priv->pin_count == 0; } -int i915_gem_do_init(struct drm_device *dev, unsigned long start, +int i915_gem_do_init(struct drm_device *dev, + unsigned long start, unsigned long end) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -80,7 +186,7 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); - dev->gtt_total = (uint32_t) (end - start); + dev_priv->mm.gtt_total = end - start; return 0; } @@ -103,14 +209,16 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_get_aperture *args = data; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - args->aper_size = dev->gtt_total; - args->aper_available_size = (args->aper_size - - atomic_read(&dev->pin_memory)); + mutex_lock(&dev->struct_mutex); + args->aper_size = dev_priv->mm.gtt_total; + args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; + mutex_unlock(&dev->struct_mutex); return 0; } @@ -136,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); - /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(obj); if (ret) { + drm_gem_object_release(obj); + i915_gem_info_remove_obj(dev->dev_private, obj->size); + kfree(obj); return ret; } + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference(obj); + trace_i915_gem_object_create(obj); + args->handle = handle; return 0; } @@ -152,19 +265,14 @@ fast_shmem_read(struct page **pages, char __user *data, int length) { - char __iomem *vaddr; - int unwritten; + char *vaddr; + int ret; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); - if (vaddr == NULL) - return -ENOMEM; - unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); + ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); kunmap_atomic(vaddr); - if (unwritten) - return -EFAULT; - - return 0; + return ret; } static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) @@ -258,22 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - mutex_lock(&dev->struct_mutex); - - ret = i915_gem_object_get_pages(obj, 0); - if (ret != 0) - goto fail_unlock; - - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, - args->size); - if (ret != 0) - goto fail_put_pages; - obj_priv = to_intel_bo(obj); offset = args->offset; @@ -290,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_shmem_read(obj_priv->pages, - page_base, page_offset, - user_data, page_length); - if (ret) - goto fail_put_pages; + if (fast_shmem_read(obj_priv->pages, + page_base, page_offset, + user_data, page_length)) + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } static int @@ -367,31 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 1, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; - goto fail_put_user_pages; + goto out; } - do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - - mutex_lock(&dev->struct_mutex); - - ret = i915_gem_object_get_pages_or_evict(obj); - if (ret) - goto fail_unlock; - - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + ret = i915_gem_object_set_cpu_read_domain_range(obj, + args->offset, args->size); - if (ret != 0) - goto fail_put_pages; + if (ret) + goto out; + + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); obj_priv = to_intel_bo(obj); offset = args->offset; @@ -436,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); -fail_put_user_pages: +out: for (i = 0; i < pinned_pages; i++) { SetPageDirty(user_pages[i]); page_cache_release(user_pages[i]); @@ -462,37 +545,64 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_pread *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - int ret; + int ret = 0; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } obj_priv = to_intel_bo(obj); /* Bounds check source. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { ret = -EINVAL; - goto err; + goto out; } + if (args->size == 0) + goto out; + if (!access_ok(VERIFY_WRITE, (char __user *)(uintptr_t)args->data_ptr, args->size)) { ret = -EFAULT; - goto err; + goto out; } - if (i915_gem_object_needs_bit17_swizzle(obj)) { - ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); - } else { + ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, + args->size); + if (ret) { + ret = -EFAULT; + goto out; + } + + ret = i915_gem_object_get_pages_or_evict(obj); + if (ret) + goto out; + + ret = i915_gem_object_set_cpu_read_domain_range(obj, + args->offset, + args->size); + if (ret) + goto out_put; + + ret = -EFAULT; + if (!i915_gem_object_needs_bit17_swizzle(obj)) ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); - if (ret != 0) - ret = i915_gem_shmem_pread_slow(dev, obj, args, - file_priv); - } + if (ret == -EFAULT) + ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); -err: - drm_gem_object_unreference_unlocked(obj); +out_put: + i915_gem_object_put_pages(obj); +out: + drm_gem_object_unreference(obj); +unlock: + mutex_unlock(&dev->struct_mutex); return ret; } @@ -513,9 +623,7 @@ fast_user_write(struct io_mapping *mapping, unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, user_data, length); io_mapping_unmap_atomic(vaddr_atomic); - if (unwritten) - return -EFAULT; - return 0; + return unwritten; } /* Here's the write path which can sleep for @@ -548,18 +656,14 @@ fast_shmem_write(struct page **pages, char __user *data, int length) { - char __iomem *vaddr; - unsigned long unwritten; + char *vaddr; + int ret; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); - if (vaddr == NULL) - return -ENOMEM; - unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); + ret = __copy_from_user_inatomic(vaddr + page_offset, data, length); kunmap_atomic(vaddr); - if (unwritten) - return -EFAULT; - return 0; + return ret; } /** @@ -577,22 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - - mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(obj, 0); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; - } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret) - goto fail; - obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; @@ -609,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, - page_offset, user_data, page_length); - /* If we get a fault while copying data, then (presumably) our * source page isn't available. Return the error and we'll * retry in the slow path. */ - if (ret) - goto fail; + if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, + page_offset, user_data, page_length)) + + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail: - i915_gem_object_unpin(obj); - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } /** @@ -665,27 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 0, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; goto out_unpin_pages; } - mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(obj, 0); - if (ret) - goto out_unlock; - ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) - goto out_unpin_object; + goto out_unpin_pages; obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; @@ -721,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, data_ptr += page_length; } -out_unpin_object: - i915_gem_object_unpin(obj); -out_unlock: - mutex_unlock(&dev->struct_mutex); out_unpin_pages: for (i = 0; i < pinned_pages; i++) page_cache_release(user_pages[i]); @@ -747,21 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - mutex_lock(&dev->struct_mutex); - - ret = i915_gem_object_get_pages(obj, 0); - if (ret != 0) - goto fail_unlock; - - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret != 0) - goto fail_put_pages; - obj_priv = to_intel_bo(obj); offset = args->offset; obj_priv->dirty = 1; @@ -779,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_shmem_write(obj_priv->pages, + if (fast_shmem_write(obj_priv->pages, page_base, page_offset, - user_data, page_length); - if (ret) - goto fail_put_pages; + user_data, page_length)) + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } /** @@ -833,30 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 0, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; - goto fail_put_user_pages; + goto out; } - do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - - mutex_lock(&dev->struct_mutex); - - ret = i915_gem_object_get_pages_or_evict(obj); - if (ret) - goto fail_unlock; - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret != 0) - goto fail_put_pages; + if (ret) + goto out; + + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); obj_priv = to_intel_bo(obj); offset = args->offset; @@ -902,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); -fail_put_user_pages: +out: for (i = 0; i < pinned_pages; i++) page_cache_release(user_pages[i]); drm_free_large(user_pages); @@ -921,29 +976,46 @@ fail_put_user_pages: */ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pwrite *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret = 0; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } obj_priv = to_intel_bo(obj); + /* Bounds check destination. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { ret = -EINVAL; - goto err; + goto out; } + if (args->size == 0) + goto out; + if (!access_ok(VERIFY_READ, (char __user *)(uintptr_t)args->data_ptr, args->size)) { ret = -EFAULT; - goto err; + goto out; + } + + ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, + args->size); + if (ret) { + ret = -EFAULT; + goto out; } /* We can only do the GTT pwrite on untiled buffers, as otherwise @@ -953,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * perspective, requiring manual detiling by the client. */ if (obj_priv->phys_obj) - ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); + ret = i915_gem_phys_pwrite(dev, obj, args, file); else if (obj_priv->tiling_mode == I915_TILING_NONE && - dev->gtt_total != 0 && + obj_priv->gtt_space && obj->write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); - if (ret == -EFAULT) { - ret = i915_gem_gtt_pwrite_slow(dev, obj, args, - file_priv); - } - } else if (i915_gem_object_needs_bit17_swizzle(obj)) { - ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); + ret = i915_gem_object_pin(obj, 0); + if (ret) + goto out; + + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + goto out_unpin; + + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); + if (ret == -EFAULT) + ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); + +out_unpin: + i915_gem_object_unpin(obj); } else { - ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); - if (ret == -EFAULT) { - ret = i915_gem_shmem_pwrite_slow(dev, obj, args, - file_priv); - } + ret = i915_gem_object_get_pages_or_evict(obj); + if (ret) + goto out; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + goto out_put; + + ret = -EFAULT; + if (!i915_gem_object_needs_bit17_swizzle(obj)) + ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); + if (ret == -EFAULT) + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); + +out_put: + i915_gem_object_put_pages(obj); } -#if WATCH_PWRITE - if (ret) - DRM_INFO("pwrite failed %d\n", ret); -#endif - -err: - drm_gem_object_unreference_unlocked(obj); +out: + drm_gem_object_unreference(obj); +unlock: + mutex_unlock(&dev->struct_mutex); return ret; } @@ -1014,19 +1101,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (write_domain != 0 && read_domains != write_domain) return -EINVAL; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - obj_priv = to_intel_bo(obj); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; - mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } + obj_priv = to_intel_bo(obj); intel_mark_busy(dev, obj); -#if WATCH_BUF - DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", - obj, obj->size, read_domains, write_domain); -#endif if (read_domains & I915_GEM_DOMAIN_GTT) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); @@ -1050,12 +1137,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); } - /* Maintain LRU order of "inactive" objects */ if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1069,30 +1156,27 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_sw_finish *args = data; struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; int ret = 0; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - mutex_unlock(&dev->struct_mutex); - return -ENOENT; + ret = -ENOENT; + goto unlock; } -#if WATCH_BUF - DRM_INFO("%s: sw_finish %d (%p %zd)\n", - __func__, args->handle, obj, obj->size); -#endif - obj_priv = to_intel_bo(obj); - /* Pinned buffers may be scanout, so flush the cache */ - if (obj_priv->pin_count) + if (to_intel_bo(obj)->pin_count) i915_gem_object_flush_cpu_write_domain(obj); drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1181,13 +1265,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Need a new fence register? */ if (obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj); + ret = i915_gem_object_get_fence_reg(obj, true); if (ret) goto unlock; } if (i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + page_offset; @@ -1246,7 +1330,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) obj->size / PAGE_SIZE, 0, 0); if (!list->file_offset_node) { DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); - ret = -ENOMEM; + ret = -ENOSPC; goto out_free_list; } @@ -1258,9 +1342,9 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) } list->hash.key = list->file_offset_node->start; - if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { + ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); + if (ret) { DRM_ERROR("failed to add to map hash\n"); - ret = -ENOMEM; goto out_free_mm; } @@ -1345,14 +1429,14 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj) * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ - if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) + if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE) return 4096; /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - if (IS_I9XX(dev)) + if (INTEL_INFO(dev)->gen == 3) start = 1024*1024; else start = 512*1024; @@ -1390,29 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - - mutex_lock(&dev->struct_mutex); - + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } obj_priv = to_intel_bo(obj); if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } - if (!obj_priv->mmap_offset) { ret = i915_gem_create_mmap_offset(obj); - if (ret) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } args->offset = obj_priv->mmap_offset; @@ -1423,20 +1505,18 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, */ if (!obj_priv->agp_mem) { ret = i915_gem_object_bind_to_gtt(obj, 0); - if (ret) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } -void +static void i915_gem_object_put_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); @@ -1470,13 +1550,25 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) obj_priv->pages = NULL; } +static uint32_t +i915_gem_next_request_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + ring->outstanding_lazy_request = true; + return dev_priv->next_seqno; +} + static void -i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, +i915_gem_object_move_to_active(struct drm_gem_object *obj, struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + uint32_t seqno = i915_gem_next_request_seqno(dev, ring); + BUG_ON(ring == NULL); obj_priv->ring = ring; @@ -1485,10 +1577,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno, drm_gem_object_reference(obj); obj_priv->active = 1; } + /* Move from whatever list we were on to the tail of execution. */ - spin_lock(&dev_priv->mm.active_list_lock); - list_move_tail(&obj_priv->list, &ring->active_list); - spin_unlock(&dev_priv->mm.active_list_lock); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj_priv->ring_list, &ring->active_list); obj_priv->last_rendering_seqno = seqno; } @@ -1500,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); BUG_ON(!obj_priv->active); - list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); + list_del_init(&obj_priv->ring_list); obj_priv->last_rendering_seqno = 0; } @@ -1538,11 +1631,11 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - i915_verify_inactive(dev, __FILE__, __LINE__); if (obj_priv->pin_count != 0) - list_del_init(&obj_priv->list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); else - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + list_del_init(&obj_priv->ring_list); BUG_ON(!list_empty(&obj_priv->gpu_write_list)); @@ -1552,30 +1645,28 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) obj_priv->active = 0; drm_gem_object_unreference(obj); } - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); } static void i915_gem_process_flushing_list(struct drm_device *dev, - uint32_t flush_domains, uint32_t seqno, + uint32_t flush_domains, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv, *next; list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.gpu_write_list, + &ring->gpu_write_list, gpu_write_list) { struct drm_gem_object *obj = &obj_priv->base; - if ((obj->write_domain & flush_domains) == - obj->write_domain && - obj_priv->ring->ring_flag == ring->ring_flag) { + if (obj->write_domain & flush_domains) { uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_active(obj, seqno, ring); + i915_gem_object_move_to_active(obj, ring); /* update the fence lru list */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { @@ -1593,23 +1684,27 @@ i915_gem_process_flushing_list(struct drm_device *dev, } uint32_t -i915_add_request(struct drm_device *dev, struct drm_file *file_priv, - uint32_t flush_domains, struct intel_ring_buffer *ring) +i915_add_request(struct drm_device *dev, + struct drm_file *file, + struct drm_i915_gem_request *request, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_file_private *i915_file_priv = NULL; - struct drm_i915_gem_request *request; + struct drm_i915_file_private *file_priv = NULL; uint32_t seqno; int was_empty; - if (file_priv != NULL) - i915_file_priv = file_priv->driver_priv; + if (file != NULL) + file_priv = file->driver_priv; - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return 0; + if (request == NULL) { + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return 0; + } - seqno = ring->add_request(dev, ring, file_priv, flush_domains); + seqno = ring->add_request(dev, ring, 0); + ring->outstanding_lazy_request = false; request->seqno = seqno; request->ring = ring; @@ -1617,23 +1712,20 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, was_empty = list_empty(&ring->request_list); list_add_tail(&request->list, &ring->request_list); - if (i915_file_priv) { + if (file_priv) { + spin_lock(&file_priv->mm.lock); + request->file_priv = file_priv; list_add_tail(&request->client_list, - &i915_file_priv->mm.request_list); - } else { - INIT_LIST_HEAD(&request->client_list); + &file_priv->mm.request_list); + spin_unlock(&file_priv->mm.lock); } - /* Associate any objects on the flushing list matching the write - * domain we're flushing with our flush. - */ - if (flush_domains != 0) - i915_gem_process_flushing_list(dev, flush_domains, seqno, ring); - if (!dev_priv->mm.suspended) { - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); if (was_empty) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + queue_delayed_work(dev_priv->wq, + &dev_priv->mm.retire_work, HZ); } return seqno; } @@ -1644,91 +1736,105 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, * Ensures that all commands in the ring are finished * before signalling the CPU */ -static uint32_t +static void i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) { uint32_t flush_domains = 0; /* The sampler always gets flushed on i965 (sigh) */ - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) flush_domains |= I915_GEM_DOMAIN_SAMPLER; ring->flush(dev, ring, I915_GEM_DOMAIN_COMMAND, flush_domains); - return flush_domains; } -/** - * Moves buffers associated only with the given active seqno from the active - * to inactive list, potentially freeing them. - */ -static void -i915_gem_retire_request(struct drm_device *dev, - struct drm_i915_gem_request *request) +static inline void +i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = request->file_priv; - trace_i915_gem_request_retire(dev, request->seqno); + if (!file_priv) + return; - /* Move any buffers on the active list that are no longer referenced - * by the ringbuffer to the flushing/inactive lists as appropriate. - */ - spin_lock(&dev_priv->mm.active_list_lock); - while (!list_empty(&request->ring->active_list)) { - struct drm_gem_object *obj; + spin_lock(&file_priv->mm.lock); + list_del(&request->client_list); + request->file_priv = NULL; + spin_unlock(&file_priv->mm.lock); +} + +static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) +{ + while (!list_empty(&ring->request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&ring->request_list, + struct drm_i915_gem_request, + list); + + list_del(&request->list); + i915_gem_request_remove_from_client(request); + kfree(request); + } + + while (!list_empty(&ring->active_list)) { struct drm_i915_gem_object *obj_priv; - obj_priv = list_first_entry(&request->ring->active_list, + obj_priv = list_first_entry(&ring->active_list, struct drm_i915_gem_object, - list); - obj = &obj_priv->base; + ring_list); - /* If the seqno being retired doesn't match the oldest in the - * list, then the oldest in the list must still be newer than - * this seqno. - */ - if (obj_priv->last_rendering_seqno != request->seqno) - goto out; - -#if WATCH_LRU - DRM_INFO("%s: retire %d moves to inactive list %p\n", - __func__, request->seqno, obj); -#endif - - if (obj->write_domain != 0) - i915_gem_object_move_to_flushing(obj); - else { - /* Take a reference on the object so it won't be - * freed while the spinlock is held. The list - * protection for this spinlock is safe when breaking - * the lock like this since the next thing we do - * is just get the head of the list again. - */ - drm_gem_object_reference(obj); - i915_gem_object_move_to_inactive(obj); - spin_unlock(&dev_priv->mm.active_list_lock); - drm_gem_object_unreference(obj); - spin_lock(&dev_priv->mm.active_list_lock); - } + obj_priv->base.write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); + i915_gem_object_move_to_inactive(&obj_priv->base); } -out: - spin_unlock(&dev_priv->mm.active_list_lock); } -/** - * Returns true if seq1 is later than seq2. - */ -bool -i915_seqno_passed(uint32_t seq1, uint32_t seq2) +void i915_gem_reset(struct drm_device *dev) { - return (int32_t)(seq1 - seq2) >= 0; -} + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int i; -uint32_t -i915_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - return ring->get_gem_seqno(dev, ring); + i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); + i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); + i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); + + /* Remove anything from the flushing lists. The GPU cache is likely + * to be lost on reset along with the data, so simply move the + * lost bo to the inactive list. + */ + while (!list_empty(&dev_priv->mm.flushing_list)) { + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + mm_list); + + obj_priv->base.write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); + i915_gem_object_move_to_inactive(&obj_priv->base); + } + + /* Move everything out of the GPU domains to ensure we do any + * necessary invalidation upon reuse. + */ + list_for_each_entry(obj_priv, + &dev_priv->mm.inactive_list, + mm_list) + { + obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; + } + + /* The fence registers are invalidated so clear them out */ + for (i = 0; i < 16; i++) { + struct drm_i915_fence_reg *reg; + + reg = &dev_priv->fence_regs[i]; + if (!reg->obj) + continue; + + i915_gem_clear_fence_reg(reg->obj); + } } /** @@ -1741,38 +1847,58 @@ i915_gem_retire_requests_ring(struct drm_device *dev, drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; - if (!ring->status_page.page_addr - || list_empty(&ring->request_list)) + if (!ring->status_page.page_addr || + list_empty(&ring->request_list)) return; - seqno = i915_get_gem_seqno(dev, ring); + WARN_ON(i915_verify_lists(dev)); + seqno = ring->get_seqno(dev, ring); while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; - uint32_t retiring_seqno; request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, list); - retiring_seqno = request->seqno; - if (i915_seqno_passed(seqno, retiring_seqno) || - atomic_read(&dev_priv->mm.wedged)) { - i915_gem_retire_request(dev, request); - - list_del(&request->list); - list_del(&request->client_list); - kfree(request); - } else + if (!i915_seqno_passed(seqno, request->seqno)) break; + + trace_i915_gem_request_retire(dev, request->seqno); + + list_del(&request->list); + i915_gem_request_remove_from_client(request); + kfree(request); + } + + /* Move any buffers on the active list that are no longer referenced + * by the ringbuffer to the flushing/inactive lists as appropriate. + */ + while (!list_empty(&ring->active_list)) { + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + obj_priv = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); + + if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) + break; + + obj = &obj_priv->base; + if (obj->write_domain != 0) + i915_gem_object_move_to_flushing(obj); + else + i915_gem_object_move_to_inactive(obj); } if (unlikely (dev_priv->trace_irq_seqno && i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { - ring->user_irq_put(dev, ring); dev_priv->trace_irq_seqno = 0; } + + WARN_ON(i915_verify_lists(dev)); } void @@ -1790,16 +1916,16 @@ i915_gem_retire_requests(struct drm_device *dev) */ list_for_each_entry_safe(obj_priv, tmp, &dev_priv->mm.deferred_free_list, - list) + mm_list) i915_gem_free_object_tail(&obj_priv->base); } i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); } -void +static void i915_gem_retire_work_handler(struct work_struct *work) { drm_i915_private_t *dev_priv; @@ -1809,20 +1935,25 @@ i915_gem_retire_work_handler(struct work_struct *work) mm.retire_work.work); dev = dev_priv->dev; - mutex_lock(&dev->struct_mutex); + /* Come back later if the device is busy... */ + if (!mutex_trylock(&dev->struct_mutex)) { + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + return; + } + i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && (!list_empty(&dev_priv->render_ring.request_list) || - (HAS_BSD(dev) && - !list_empty(&dev_priv->bsd_ring.request_list)))) + !list_empty(&dev_priv->bsd_ring.request_list) || + !list_empty(&dev_priv->blt_ring.request_list))) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, - int interruptible, struct intel_ring_buffer *ring) + bool interruptible, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; u32 ier; @@ -1831,9 +1962,16 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, BUG_ON(seqno == 0); if (atomic_read(&dev_priv->mm.wedged)) - return -EIO; + return -EAGAIN; - if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) { + if (ring->outstanding_lazy_request) { + seqno = i915_add_request(dev, NULL, NULL, ring); + if (seqno == 0) + return -ENOMEM; + } + BUG_ON(seqno == dev_priv->next_seqno); + + if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { if (HAS_PCH_SPLIT(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); else @@ -1852,12 +1990,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (interruptible) ret = wait_event_interruptible(ring->irq_queue, i915_seqno_passed( - ring->get_gem_seqno(dev, ring), seqno) + ring->get_seqno(dev, ring), seqno) || atomic_read(&dev_priv->mm.wedged)); else wait_event(ring->irq_queue, i915_seqno_passed( - ring->get_gem_seqno(dev, ring), seqno) + ring->get_seqno(dev, ring), seqno) || atomic_read(&dev_priv->mm.wedged)); ring->user_irq_put(dev, ring); @@ -1866,11 +2004,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_end(dev, seqno); } if (atomic_read(&dev_priv->mm.wedged)) - ret = -EIO; + ret = -EAGAIN; if (ret && ret != -ERESTARTSYS) - DRM_ERROR("%s returns %d (awaiting %d at %d)\n", - __func__, ret, seqno, ring->get_gem_seqno(dev, ring)); + DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", + __func__, ret, seqno, ring->get_seqno(dev, ring), + dev_priv->next_seqno); /* Directly dispatch request retiring. While we have the work queue * to handle this, the waiter on a request often wants an associated @@ -1889,27 +2028,48 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, */ static int i915_wait_request(struct drm_device *dev, uint32_t seqno, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { return i915_do_wait_request(dev, seqno, 1, ring); } +static void +i915_gem_flush_ring(struct drm_device *dev, + struct drm_file *file_priv, + struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains) +{ + ring->flush(dev, ring, invalidate_domains, flush_domains); + i915_gem_process_flushing_list(dev, flush_domains, ring); +} + static void i915_gem_flush(struct drm_device *dev, + struct drm_file *file_priv, uint32_t invalidate_domains, - uint32_t flush_domains) + uint32_t flush_domains, + uint32_t flush_rings) { drm_i915_private_t *dev_priv = dev->dev_private; + if (flush_domains & I915_GEM_DOMAIN_CPU) drm_agp_chipset_flush(dev); - dev_priv->render_ring.flush(dev, &dev_priv->render_ring, - invalidate_domains, - flush_domains); - if (HAS_BSD(dev)) - dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring, - invalidate_domains, - flush_domains); + if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { + if (flush_rings & RING_RENDER) + i915_gem_flush_ring(dev, file_priv, + &dev_priv->render_ring, + invalidate_domains, flush_domains); + if (flush_rings & RING_BSD) + i915_gem_flush_ring(dev, file_priv, + &dev_priv->bsd_ring, + invalidate_domains, flush_domains); + if (flush_rings & RING_BLT) + i915_gem_flush_ring(dev, file_priv, + &dev_priv->blt_ring, + invalidate_domains, flush_domains); + } } /** @@ -1917,7 +2077,8 @@ i915_gem_flush(struct drm_device *dev, * safe to unbind from the GTT or access from the CPU. */ static int -i915_gem_object_wait_rendering(struct drm_gem_object *obj) +i915_gem_object_wait_rendering(struct drm_gem_object *obj, + bool interruptible) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); @@ -1932,13 +2093,11 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) * it. */ if (obj_priv->active) { -#if WATCH_BUF - DRM_INFO("%s: object %p wait for seqno %08x\n", - __func__, obj, obj_priv->last_rendering_seqno); -#endif - ret = i915_wait_request(dev, - obj_priv->last_rendering_seqno, obj_priv->ring); - if (ret != 0) + ret = i915_do_wait_request(dev, + obj_priv->last_rendering_seqno, + interruptible, + obj_priv->ring); + if (ret) return ret; } @@ -1952,14 +2111,10 @@ int i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; -#if WATCH_BUF - DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); - DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); -#endif if (obj_priv->gtt_space == NULL) return 0; @@ -1984,33 +2139,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj) * should be safe and we need to cleanup or else we might * cause memory corruption through use-after-free. */ + if (ret) { + i915_gem_clflush_object(obj); + obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; + } /* release the fence reg _after_ flushing */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); - if (obj_priv->agp_mem != NULL) { - drm_unbind_agp(obj_priv->agp_mem); - drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); - obj_priv->agp_mem = NULL; - } + drm_unbind_agp(obj_priv->agp_mem); + drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); i915_gem_object_put_pages(obj); BUG_ON(obj_priv->pages_refcount); - if (obj_priv->gtt_space) { - atomic_dec(&dev->gtt_count); - atomic_sub(obj->size, &dev->gtt_memory); + i915_gem_info_remove_gtt(dev_priv, obj->size); + list_del_init(&obj_priv->mm_list); - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; - } - - /* Remove ourselves from the LRU list if present. */ - spin_lock(&dev_priv->mm.active_list_lock); - if (!list_empty(&obj_priv->list)) - list_del_init(&obj_priv->list); - spin_unlock(&dev_priv->mm.active_list_lock); + drm_mm_put_block(obj_priv->gtt_space); + obj_priv->gtt_space = NULL; + obj_priv->gtt_offset = 0; if (i915_gem_object_is_purgeable(obj_priv)) i915_gem_object_truncate(obj); @@ -2020,48 +2169,50 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return ret; } +static int i915_ring_idle(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + if (list_empty(&ring->gpu_write_list)) + return 0; + + i915_gem_flush_ring(dev, NULL, ring, + I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + return i915_wait_request(dev, + i915_gem_next_request_seqno(dev, ring), + ring); +} + int i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; bool lists_empty; - uint32_t seqno1, seqno2; int ret; - spin_lock(&dev_priv->mm.active_list_lock); lists_empty = (list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) || - list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); if (lists_empty) return 0; /* Flush everything onto the inactive list. */ - i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, - &dev_priv->render_ring); - if (seqno1 == 0) - return -ENOMEM; - ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring); + ret = i915_ring_idle(dev, &dev_priv->render_ring); + if (ret) + return ret; - if (HAS_BSD(dev)) { - seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS, - &dev_priv->bsd_ring); - if (seqno2 == 0) - return -ENOMEM; + ret = i915_ring_idle(dev, &dev_priv->bsd_ring); + if (ret) + return ret; - ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring); - if (ret) - return ret; - } + ret = i915_ring_idle(dev, &dev_priv->blt_ring); + if (ret) + return ret; - - return ret; + return 0; } -int +static int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) { @@ -2241,7 +2392,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); } -static int i915_find_fence_reg(struct drm_device *dev) +static int i915_find_fence_reg(struct drm_device *dev, + bool interruptible) { struct drm_i915_fence_reg *reg = NULL; struct drm_i915_gem_object *obj_priv = NULL; @@ -2286,7 +2438,7 @@ static int i915_find_fence_reg(struct drm_device *dev) * private reference to obj like the other callers of put_fence_reg * (set_tiling ioctl) do. */ drm_gem_object_reference(obj); - ret = i915_gem_object_put_fence_reg(obj); + ret = i915_gem_object_put_fence_reg(obj, interruptible); drm_gem_object_unreference(obj); if (ret != 0) return ret; @@ -2308,7 +2460,8 @@ static int i915_find_fence_reg(struct drm_device *dev) * and tiling format. */ int -i915_gem_object_get_fence_reg(struct drm_gem_object *obj) +i915_gem_object_get_fence_reg(struct drm_gem_object *obj, + bool interruptible) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2343,7 +2496,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) break; } - ret = i915_find_fence_reg(dev); + ret = i915_find_fence_reg(dev, interruptible); if (ret < 0) return ret; @@ -2421,15 +2574,19 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) * i915_gem_object_put_fence_reg - waits on outstanding fenced access * to the buffer to finish, and then resets the fence register. * @obj: tiled object holding a fence register. + * @bool: whether the wait upon the fence is interruptible * * Zeroes out the fence register itself and clears out the associated * data structures in dev_priv and obj_priv. */ int -i915_gem_object_put_fence_reg(struct drm_gem_object *obj) +i915_gem_object_put_fence_reg(struct drm_gem_object *obj, + bool interruptible) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_fence_reg *reg; if (obj_priv->fence_reg == I915_FENCE_REG_NONE) return 0; @@ -2444,20 +2601,23 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) * therefore we must wait for any outstanding access to complete * before clearing the fence. */ - if (!IS_I965G(dev)) { + reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + if (reg->gpu) { int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret != 0) + ret = i915_gem_object_flush_gpu_write_domain(obj, true); + if (ret) return ret; - ret = i915_gem_object_wait_rendering(obj); - if (ret != 0) + ret = i915_gem_object_wait_rendering(obj, interruptible); + if (ret) return ret; + + reg->gpu = false; } i915_gem_object_flush_gtt_write_domain(obj); - i915_gem_clear_fence_reg (obj); + i915_gem_clear_fence_reg(obj); return 0; } @@ -2490,7 +2650,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->size > dev->gtt_total) { + if (obj->size > dev_priv->mm.gtt_total) { DRM_ERROR("Attempting to bind an object larger than the aperture\n"); return -E2BIG; } @@ -2498,19 +2658,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) search_free: free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, obj->size, alignment, 0); - if (free_space != NULL) { + if (free_space != NULL) obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, alignment); - if (obj_priv->gtt_space != NULL) - obj_priv->gtt_offset = obj_priv->gtt_space->start; - } if (obj_priv->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ -#if WATCH_LRU - DRM_INFO("%s: GTT full, evicting something\n", __func__); -#endif ret = i915_gem_evict_something(dev, obj->size, alignment); if (ret) return ret; @@ -2518,10 +2672,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) goto search_free; } -#if WATCH_BUF - DRM_INFO("Binding object of size %zd at 0x%08x\n", - obj->size, obj_priv->gtt_offset); -#endif ret = i915_gem_object_get_pages(obj, gfpmask); if (ret) { drm_mm_put_block(obj_priv->gtt_space); @@ -2553,7 +2703,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) obj_priv->agp_mem = drm_agp_bind_pages(dev, obj_priv->pages, obj->size >> PAGE_SHIFT, - obj_priv->gtt_offset, + obj_priv->gtt_space->start, obj_priv->agp_type); if (obj_priv->agp_mem == NULL) { i915_gem_object_put_pages(obj); @@ -2566,11 +2716,10 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) goto search_free; } - atomic_inc(&dev->gtt_count); - atomic_add(obj->size, &dev->gtt_memory); /* keep track of bounds object by adding it to the inactive list */ - list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + i915_gem_info_add_gtt(dev_priv, obj->size); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -2579,6 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + obj_priv->gtt_offset = obj_priv->gtt_space->start; trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); return 0; @@ -2603,25 +2753,30 @@ i915_gem_clflush_object(struct drm_gem_object *obj) /** Flushes any GPU write domain for the object if it's dirty. */ static int -i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) +i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, + bool pipelined) { struct drm_device *dev = obj->dev; uint32_t old_write_domain; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) return 0; /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; - i915_gem_flush(dev, 0, obj->write_domain); - if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) - return -ENOMEM; + i915_gem_flush_ring(dev, NULL, + to_intel_bo(obj)->ring, + 0, obj->write_domain); + BUG_ON(obj->write_domain); trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); - return 0; + + if (pipelined) + return 0; + + return i915_gem_object_wait_rendering(obj, true); } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2665,26 +2820,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) old_write_domain); } -int -i915_gem_object_flush_write_domain(struct drm_gem_object *obj) -{ - int ret = 0; - - switch (obj->write_domain) { - case I915_GEM_DOMAIN_GTT: - i915_gem_object_flush_gtt_write_domain(obj); - break; - case I915_GEM_DOMAIN_CPU: - i915_gem_object_flush_cpu_write_domain(obj); - break; - default: - ret = i915_gem_object_flush_gpu_write_domain(obj); - break; - } - - return ret; -} - /** * Moves a single object to the GTT read, and possibly write domain. * @@ -2702,32 +2837,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) if (obj_priv->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj, false); if (ret != 0) return ret; - /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj); - if (ret != 0) - return ret; + i915_gem_object_flush_cpu_write_domain(obj); + + if (write) { + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + } old_write_domain = obj->write_domain; old_read_domains = obj->read_domains; - /* If we're writing through the GTT domain, then CPU and GPU caches - * will need to be invalidated at next use. - */ - if (write) - obj->read_domains &= I915_GEM_DOMAIN_GTT; - - i915_gem_object_flush_cpu_write_domain(obj); - /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); obj->read_domains |= I915_GEM_DOMAIN_GTT; if (write) { + obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = I915_GEM_DOMAIN_GTT; obj_priv->dirty = 1; } @@ -2744,51 +2875,36 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) * wait, as in modesetting process we're not supposed to be interrupted. */ int -i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) +i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, + bool pipelined) { - struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - uint32_t old_write_domain, old_read_domains; + uint32_t old_read_domains; int ret; /* Not valid to be called on unbound objects. */ if (obj_priv->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj, true); if (ret) return ret; - /* Wait on any GPU rendering and flushing to occur. */ - if (obj_priv->active) { -#if WATCH_BUF - DRM_INFO("%s: object %p wait for seqno %08x\n", - __func__, obj, obj_priv->last_rendering_seqno); -#endif - ret = i915_do_wait_request(dev, - obj_priv->last_rendering_seqno, - 0, - obj_priv->ring); - if (ret != 0) + /* Currently, we are always called from an non-interruptible context. */ + if (!pipelined) { + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) return ret; } i915_gem_object_flush_cpu_write_domain(obj); - old_write_domain = obj->write_domain; old_read_domains = obj->read_domains; - - /* It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); - obj->read_domains = I915_GEM_DOMAIN_GTT; - obj->write_domain = I915_GEM_DOMAIN_GTT; - obj_priv->dirty = 1; + obj->read_domains |= I915_GEM_DOMAIN_GTT; trace_i915_gem_object_change_domain(obj, old_read_domains, - old_write_domain); + obj->write_domain); return 0; } @@ -2805,12 +2921,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) uint32_t old_write_domain, old_read_domains; int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - - /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj, false); if (ret != 0) return ret; @@ -2821,6 +2932,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) */ i915_gem_object_set_to_full_cpu_read_domain(obj); + if (write) { + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + } + old_write_domain = obj->write_domain; old_read_domains = obj->read_domains; @@ -2840,7 +2957,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * need to be invalidated at next use. */ if (write) { - obj->read_domains &= I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; } @@ -2963,26 +3080,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, + struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; uint32_t old_read_domains; - BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); - BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); - intel_mark_busy(dev, obj); -#if WATCH_BUF - DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", - __func__, obj, - obj->read_domains, obj->pending_read_domains, - obj->write_domain, obj->pending_write_domain); -#endif /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains @@ -3009,13 +3118,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) * stale data. That is, any new read domains. */ invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; - if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { -#if WATCH_BUF - DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", - __func__, flush_domains, invalidate_domains); -#endif + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) i915_gem_clflush_object(obj); - } old_read_domains = obj->read_domains; @@ -3029,21 +3133,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) obj->pending_write_domain = obj->write_domain; obj->read_domains = obj->pending_read_domains; - if (flush_domains & I915_GEM_GPU_DOMAINS) { - if (obj_priv->ring == &dev_priv->render_ring) - dev_priv->flush_rings |= FLUSH_RENDER_RING; - else if (obj_priv->ring == &dev_priv->bsd_ring) - dev_priv->flush_rings |= FLUSH_BSD_RING; - } - dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; -#if WATCH_BUF - DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", - __func__, - obj->read_domains, obj->write_domain, - dev->invalidate_domains, dev->flush_domains); -#endif + if (flush_domains & I915_GEM_GPU_DOMAINS) + dev_priv->mm.flush_rings |= obj_priv->ring->id; + if (invalidate_domains & I915_GEM_GPU_DOMAINS) + dev_priv->mm.flush_rings |= ring->id; trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -3106,12 +3201,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, if (offset == 0 && size == obj->size) return i915_gem_object_set_to_cpu_domain(obj, 0); - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - - /* Wait on any GPU rendering and flushing to occur. */ - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj, false); if (ret != 0) return ret; i915_gem_object_flush_gtt_write_domain(obj); @@ -3164,66 +3254,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, * Pin an object to the GTT and evaluate the relocations landing in it. */ static int -i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry, - struct drm_i915_gem_relocation_entry *relocs) +i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int i, ret; - void __iomem *reloc_page; - bool need_fence; + struct drm_i915_gem_relocation_entry __user *user_relocs; + struct drm_gem_object *target_obj = NULL; + uint32_t target_handle = 0; + int i, ret = 0; - need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj_priv->tiling_mode != I915_TILING_NONE; - - /* Check fence reg constraints and rebind if necessary */ - if (need_fence && - !i915_gem_object_fence_offset_ok(obj, - obj_priv->tiling_mode)) { - ret = i915_gem_object_unbind(obj); - if (ret) - return ret; - } - - /* Choose the GTT offset for our buffer and put it there. */ - ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); - if (ret) - return ret; - - /* - * Pre-965 chips need a fence register set up in order to - * properly handle blits to/from tiled surfaces. - */ - if (need_fence) { - ret = i915_gem_object_get_fence_reg(obj); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; - } - } - - entry->offset = obj_priv->gtt_offset; - - /* Apply the relocations, using the GTT aperture to avoid cache - * flushing requirements. - */ + user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; for (i = 0; i < entry->relocation_count; i++) { - struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; - struct drm_gem_object *target_obj; - struct drm_i915_gem_object *target_obj_priv; - uint32_t reloc_val, reloc_offset; - uint32_t __iomem *reloc_entry; + struct drm_i915_gem_relocation_entry reloc; + uint32_t target_offset; - target_obj = drm_gem_object_lookup(obj->dev, file_priv, - reloc->target_handle); - if (target_obj == NULL) { - i915_gem_object_unpin(obj); - return -ENOENT; + if (__copy_from_user_inatomic(&reloc, + user_relocs+i, + sizeof(reloc))) { + ret = -EFAULT; + break; } - target_obj_priv = to_intel_bo(target_obj); + + if (reloc.target_handle != target_handle) { + drm_gem_object_unreference(target_obj); + + target_obj = drm_gem_object_lookup(dev, file_priv, + reloc.target_handle); + if (target_obj == NULL) { + ret = -ENOENT; + break; + } + + target_handle = reloc.target_handle; + } + target_offset = to_intel_bo(target_obj)->gtt_offset; #if WATCH_RELOC DRM_INFO("%s: obj %p offset %08x target %d " @@ -3231,146 +3297,203 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, "presumed %08x delta %08x\n", __func__, obj, - (int) reloc->offset, - (int) reloc->target_handle, - (int) reloc->read_domains, - (int) reloc->write_domain, - (int) target_obj_priv->gtt_offset, - (int) reloc->presumed_offset, - reloc->delta); + (int) reloc.offset, + (int) reloc.target_handle, + (int) reloc.read_domains, + (int) reloc.write_domain, + (int) target_offset, + (int) reloc.presumed_offset, + reloc.delta); #endif /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ - if (target_obj_priv->gtt_space == NULL) { + if (target_offset == 0) { DRM_ERROR("No GTT space found for object %d\n", - reloc->target_handle); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + reloc.target_handle); + ret = -EINVAL; + break; } /* Validate that the target is in a valid r/w GPU domain */ - if (reloc->write_domain & (reloc->write_domain - 1)) { + if (reloc.write_domain & (reloc.write_domain - 1)) { DRM_ERROR("reloc with multiple write domains: " "obj %p target %d offset %d " "read %08x write %08x", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.offset, + reloc.read_domains, + reloc.write_domain); + ret = -EINVAL; + break; } - if (reloc->write_domain & I915_GEM_DOMAIN_CPU || - reloc->read_domains & I915_GEM_DOMAIN_CPU) { + if (reloc.write_domain & I915_GEM_DOMAIN_CPU || + reloc.read_domains & I915_GEM_DOMAIN_CPU) { DRM_ERROR("reloc with read/write CPU domains: " "obj %p target %d offset %d " "read %08x write %08x", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.offset, + reloc.read_domains, + reloc.write_domain); + ret = -EINVAL; + break; } - if (reloc->write_domain && target_obj->pending_write_domain && - reloc->write_domain != target_obj->pending_write_domain) { + if (reloc.write_domain && target_obj->pending_write_domain && + reloc.write_domain != target_obj->pending_write_domain) { DRM_ERROR("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->write_domain, + obj, reloc.target_handle, + (int) reloc.offset, + reloc.write_domain, target_obj->pending_write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + ret = -EINVAL; + break; } - target_obj->pending_read_domains |= reloc->read_domains; - target_obj->pending_write_domain |= reloc->write_domain; + target_obj->pending_read_domains |= reloc.read_domains; + target_obj->pending_write_domain |= reloc.write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ - if (target_obj_priv->gtt_offset == reloc->presumed_offset) { - drm_gem_object_unreference(target_obj); + if (target_offset == reloc.presumed_offset) continue; - } /* Check that the relocation address is valid... */ - if (reloc->offset > obj->size - 4) { + if (reloc.offset > obj->base.size - 4) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->offset, (int) obj->size); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.offset, (int) obj->base.size); + ret = -EINVAL; + break; } - if (reloc->offset & 3) { + if (reloc.offset & 3) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", - obj, reloc->target_handle, - (int) reloc->offset); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.offset); + ret = -EINVAL; + break; } /* and points to somewhere within the target object. */ - if (reloc->delta >= target_obj->size) { + if (reloc.delta >= target_obj->size) { DRM_ERROR("Relocation beyond target object bounds: " "obj %p target %d delta %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->delta, (int) target_obj->size); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.delta, (int) target_obj->size); + ret = -EINVAL; + break; } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret != 0) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + reloc.delta += target_offset; + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { + uint32_t page_offset = reloc.offset & ~PAGE_MASK; + char *vaddr; + + vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); + *(uint32_t *)(vaddr + page_offset) = reloc.delta; + kunmap_atomic(vaddr); + } else { + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + + ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); + if (ret) + break; + + /* Map the page containing the relocation we're going to perform. */ + reloc.offset += obj->gtt_offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + reloc.offset & PAGE_MASK); + reloc_entry = (uint32_t __iomem *) + (reloc_page + (reloc.offset & ~PAGE_MASK)); + iowrite32(reloc.delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page); } - /* Map the page containing the relocation we're going to - * perform. - */ - reloc_offset = obj_priv->gtt_offset + reloc->offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - (reloc_offset & - ~(PAGE_SIZE - 1))); - reloc_entry = (uint32_t __iomem *)(reloc_page + - (reloc_offset & (PAGE_SIZE - 1))); - reloc_val = target_obj_priv->gtt_offset + reloc->delta; - -#if WATCH_BUF - DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", - obj, (unsigned int) reloc->offset, - readl(reloc_entry), reloc_val); -#endif - writel(reloc_val, reloc_entry); - io_mapping_unmap_atomic(reloc_page); - - /* The updated presumed offset for this entry will be - * copied back out to the user. - */ - reloc->presumed_offset = target_obj_priv->gtt_offset; - - drm_gem_object_unreference(target_obj); + /* and update the user's relocation entry */ + reloc.presumed_offset = target_offset; + if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, + &reloc.presumed_offset, + sizeof(reloc.presumed_offset))) { + ret = -EFAULT; + break; + } + } + + drm_gem_object_unreference(target_obj); + return ret; +} + +static int +i915_gem_execbuffer_pin(struct drm_device *dev, + struct drm_file *file, + struct drm_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i, retry; + + /* attempt to pin all of the buffers into the GTT */ + for (retry = 0; retry < 2; retry++) { + ret = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; + struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); + bool need_fence = + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + + /* Check fence reg constraints and rebind if necessary */ + if (need_fence && + !i915_gem_object_fence_offset_ok(&obj->base, + obj->tiling_mode)) { + ret = i915_gem_object_unbind(&obj->base); + if (ret) + break; + } + + ret = i915_gem_object_pin(&obj->base, entry->alignment); + if (ret) + break; + + /* + * Pre-965 chips need a fence register set up in order + * to properly handle blits to/from tiled surfaces. + */ + if (need_fence) { + ret = i915_gem_object_get_fence_reg(&obj->base, true); + if (ret) { + i915_gem_object_unpin(&obj->base); + break; + } + + dev_priv->fence_regs[obj->fence_reg].gpu = true; + } + + entry->offset = obj->gtt_offset; + } + + while (i--) + i915_gem_object_unpin(object_list[i]); + + if (ret == 0) + break; + + if (ret != -ENOSPC || retry) + return ret; + + ret = i915_gem_evict_everything(dev); + if (ret) + return ret; } -#if WATCH_BUF - if (0) - i915_gem_dump_object(obj, 128, __func__, ~0); -#endif return 0; } @@ -3385,113 +3508,55 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, * relatively low latency when blocking on a particular request to finish. */ static int -i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) +i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; - int ret = 0; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; unsigned long recent_enough = jiffies - msecs_to_jiffies(20); + struct drm_i915_gem_request *request; + struct intel_ring_buffer *ring = NULL; + u32 seqno = 0; + int ret; - mutex_lock(&dev->struct_mutex); - while (!list_empty(&i915_file_priv->mm.request_list)) { - struct drm_i915_gem_request *request; - - request = list_first_entry(&i915_file_priv->mm.request_list, - struct drm_i915_gem_request, - client_list); - + spin_lock(&file_priv->mm.lock); + list_for_each_entry(request, &file_priv->mm.request_list, client_list) { if (time_after_eq(request->emitted_jiffies, recent_enough)) break; - ret = i915_wait_request(dev, request->seqno, request->ring); - if (ret != 0) - break; + ring = request->ring; + seqno = request->seqno; } - mutex_unlock(&dev->struct_mutex); + spin_unlock(&file_priv->mm.lock); + + if (seqno == 0) + return 0; + + ret = 0; + if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { + /* And wait for the seqno passing without holding any locks and + * causing extra latency for others. This is safe as the irq + * generation is designed to be run atomically and so is + * lockless. + */ + ring->user_irq_get(dev, ring); + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(dev, ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + ring->user_irq_put(dev, ring); + + if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) + ret = -EIO; + } + + if (ret == 0) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); return ret; } static int -i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, - uint32_t buffer_count, - struct drm_i915_gem_relocation_entry **relocs) -{ - uint32_t reloc_count = 0, reloc_index = 0, i; - int ret; - - *relocs = NULL; - for (i = 0; i < buffer_count; i++) { - if (reloc_count + exec_list[i].relocation_count < reloc_count) - return -EINVAL; - reloc_count += exec_list[i].relocation_count; - } - - *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); - if (*relocs == NULL) { - DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); - return -ENOMEM; - } - - for (i = 0; i < buffer_count; i++) { - struct drm_i915_gem_relocation_entry __user *user_relocs; - - user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - - ret = copy_from_user(&(*relocs)[reloc_index], - user_relocs, - exec_list[i].relocation_count * - sizeof(**relocs)); - if (ret != 0) { - drm_free_large(*relocs); - *relocs = NULL; - return -EFAULT; - } - - reloc_index += exec_list[i].relocation_count; - } - - return 0; -} - -static int -i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, - uint32_t buffer_count, - struct drm_i915_gem_relocation_entry *relocs) -{ - uint32_t reloc_count = 0, i; - int ret = 0; - - if (relocs == NULL) - return 0; - - for (i = 0; i < buffer_count; i++) { - struct drm_i915_gem_relocation_entry __user *user_relocs; - int unwritten; - - user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - - unwritten = copy_to_user(user_relocs, - &relocs[reloc_count], - exec_list[i].relocation_count * - sizeof(*relocs)); - - if (unwritten) { - ret = -EFAULT; - goto err; - } - - reloc_count += exec_list[i].relocation_count; - } - -err: - drm_free_large(relocs); - - return ret; -} - -static int -i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, - uint64_t exec_offset) +i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, + uint64_t exec_offset) { uint32_t exec_start, exec_len; @@ -3508,44 +3573,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, } static int -i915_gem_wait_for_pending_flip(struct drm_device *dev, - struct drm_gem_object **object_list, - int count) +validate_exec_list(struct drm_i915_gem_exec_object2 *exec, + int count) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - DEFINE_WAIT(wait); - int i, ret = 0; + int i; - for (;;) { - prepare_to_wait(&dev_priv->pending_flip_queue, - &wait, TASK_INTERRUPTIBLE); - for (i = 0; i < count; i++) { - obj_priv = to_intel_bo(object_list[i]); - if (atomic_read(&obj_priv->pending_flip) > 0) - break; - } - if (i == count) - break; + for (i = 0; i < count; i++) { + char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; + size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); - if (!signal_pending(current)) { - mutex_unlock(&dev->struct_mutex); - schedule(); - mutex_lock(&dev->struct_mutex); - continue; - } - ret = -ERESTARTSYS; - break; + if (!access_ok(VERIFY_READ, ptr, length)) + return -EFAULT; + + /* we may also need to update the presumed offsets */ + if (!access_ok(VERIFY_WRITE, ptr, length)) + return -EFAULT; + + if (fault_in_pages_readable(ptr, length)) + return -EFAULT; } - finish_wait(&dev_priv->pending_flip_queue, &wait); - return ret; + return 0; } - -int +static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv, + struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_exec_object2 *exec_list) { @@ -3554,26 +3607,47 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs = NULL; - int ret = 0, ret2, i, pinned = 0; + struct drm_i915_gem_request *request = NULL; + int ret, i, flips; uint64_t exec_offset; - uint32_t seqno, flush_domains, reloc_index; - int pin_tries, flips; struct intel_ring_buffer *ring = NULL; + ret = i915_gem_check_is_wedged(dev); + if (ret) + return ret; + + ret = validate_exec_list(exec_list, args->buffer_count); + if (ret) + return ret; + #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", (int) args->buffers_ptr, args->buffer_count, args->batch_len); #endif - if (args->flags & I915_EXEC_BSD) { + switch (args->flags & I915_EXEC_RING_MASK) { + case I915_EXEC_DEFAULT: + case I915_EXEC_RENDER: + ring = &dev_priv->render_ring; + break; + case I915_EXEC_BSD: if (!HAS_BSD(dev)) { - DRM_ERROR("execbuf with wrong flag\n"); + DRM_ERROR("execbuf with invalid ring (BSD)\n"); return -EINVAL; } ring = &dev_priv->bsd_ring; - } else { - ring = &dev_priv->render_ring; + break; + case I915_EXEC_BLT: + if (!HAS_BLT(dev)) { + DRM_ERROR("execbuf with invalid ring (BLT)\n"); + return -EINVAL; + } + ring = &dev_priv->blt_ring; + break; + default: + DRM_ERROR("execbuf with unknown ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); + return -EINVAL; } if (args->buffer_count < 1) { @@ -3608,21 +3682,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } } - ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, - &relocs); - if (ret != 0) - goto pre_mutex_err; - - mutex_lock(&dev->struct_mutex); - - i915_verify_inactive(dev, __FILE__, __LINE__); - - if (atomic_read(&dev_priv->mm.wedged)) { - mutex_unlock(&dev->struct_mutex); - ret = -EIO; + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) { + ret = -ENOMEM; goto pre_mutex_err; } + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto pre_mutex_err; + if (dev_priv->mm.suspended) { mutex_unlock(&dev->struct_mutex); ret = -EBUSY; @@ -3630,9 +3699,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Look up object handles */ - flips = 0; for (i = 0; i < args->buffer_count; i++) { - object_list[i] = drm_gem_object_lookup(dev, file_priv, + object_list[i] = drm_gem_object_lookup(dev, file, exec_list[i].handle); if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", @@ -3653,78 +3721,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } obj_priv->in_execbuffer = true; - flips += atomic_read(&obj_priv->pending_flip); } - if (flips > 0) { - ret = i915_gem_wait_for_pending_flip(dev, object_list, - args->buffer_count); + /* Move the objects en-masse into the GTT, evicting if necessary. */ + ret = i915_gem_execbuffer_pin(dev, file, + object_list, exec_list, + args->buffer_count); + if (ret) + goto err; + + /* The objects are in their final locations, apply the relocations. */ + for (i = 0; i < args->buffer_count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); if (ret) goto err; } - /* Pin and relocate */ - for (pin_tries = 0; ; pin_tries++) { - ret = 0; - reloc_index = 0; - - for (i = 0; i < args->buffer_count; i++) { - object_list[i]->pending_read_domains = 0; - object_list[i]->pending_write_domain = 0; - ret = i915_gem_object_pin_and_relocate(object_list[i], - file_priv, - &exec_list[i], - &relocs[reloc_index]); - if (ret) - break; - pinned = i + 1; - reloc_index += exec_list[i].relocation_count; - } - /* success */ - if (ret == 0) - break; - - /* error other than GTT full, or we've already tried again */ - if (ret != -ENOSPC || pin_tries >= 1) { - if (ret != -ERESTARTSYS) { - unsigned long long total_size = 0; - int num_fences = 0; - for (i = 0; i < args->buffer_count; i++) { - obj_priv = to_intel_bo(object_list[i]); - - total_size += object_list[i]->size; - num_fences += - exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && - obj_priv->tiling_mode != I915_TILING_NONE; - } - DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", - pinned+1, args->buffer_count, - total_size, num_fences, - ret); - DRM_ERROR("%d objects [%d pinned], " - "%d object bytes [%d pinned], " - "%d/%d gtt bytes\n", - atomic_read(&dev->object_count), - atomic_read(&dev->pin_count), - atomic_read(&dev->object_memory), - atomic_read(&dev->pin_memory), - atomic_read(&dev->gtt_memory), - dev->gtt_total); - } - goto err; - } - - /* unpin all of our buffers */ - for (i = 0; i < pinned; i++) - i915_gem_object_unpin(object_list[i]); - pinned = 0; - - /* evict everyone we can from the aperture */ - ret = i915_gem_evict_everything(dev); - if (ret && ret != -ENOSPC) - goto err; - } - /* Set the pending read domains for the batch buffer to COMMAND */ batch_obj = object_list[args->buffer_count-1]; if (batch_obj->pending_write_domain) { @@ -3734,33 +3749,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - /* Sanity check the batch buffer, prior to moving objects */ - exec_offset = exec_list[args->buffer_count - 1].offset; - ret = i915_gem_check_execbuffer (args, exec_offset); + /* Sanity check the batch buffer */ + exec_offset = to_intel_bo(batch_obj)->gtt_offset; + ret = i915_gem_check_execbuffer(args, exec_offset); if (ret != 0) { DRM_ERROR("execbuf with invalid offset/length\n"); goto err; } - i915_verify_inactive(dev, __FILE__, __LINE__); - /* Zero the global flush/invalidate flags. These * will be modified as new domains are computed * for each object */ dev->invalidate_domains = 0; dev->flush_domains = 0; - dev_priv->flush_rings = 0; + dev_priv->mm.flush_rings = 0; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj); + i915_gem_object_set_to_gpu_domain(obj, ring); } - i915_verify_inactive(dev, __FILE__, __LINE__); - if (dev->invalidate_domains | dev->flush_domains) { #if WATCH_EXEC DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", @@ -3768,38 +3779,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dev->invalidate_domains, dev->flush_domains); #endif - i915_gem_flush(dev, + i915_gem_flush(dev, file, dev->invalidate_domains, - dev->flush_domains); - if (dev_priv->flush_rings & FLUSH_RENDER_RING) - (void)i915_add_request(dev, file_priv, - dev->flush_domains, - &dev_priv->render_ring); - if (dev_priv->flush_rings & FLUSH_BSD_RING) - (void)i915_add_request(dev, file_priv, - dev->flush_domains, - &dev_priv->bsd_ring); + dev->flush_domains, + dev_priv->mm.flush_rings); } for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain = obj->write_domain; - obj->write_domain = obj->pending_write_domain; - if (obj->write_domain) - list_move_tail(&obj_priv->gpu_write_list, - &dev_priv->mm.gpu_write_list); - else - list_del_init(&obj_priv->gpu_write_list); - trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); } - i915_verify_inactive(dev, __FILE__, __LINE__); - #if WATCH_COHERENCY for (i = 0; i < args->buffer_count; i++) { i915_gem_object_check_coherency(object_list[i], @@ -3814,9 +3808,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ~0); #endif + /* Check for any pending flips. As we only maintain a flip queue depth + * of 1, we can simply insert a WAIT for the next display flip prior + * to executing the batch and avoid stalling the CPU. + */ + flips = 0; + for (i = 0; i < args->buffer_count; i++) { + if (object_list[i]->write_domain) + flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); + } + if (flips) { + int plane, flip_mask; + + for (plane = 0; flips >> plane; plane++) { + if (((flips >> plane) & 1) == 0) + continue; + + if (plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, + MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(dev, ring, MI_NOOP); + intel_ring_advance(dev, ring); + } + } + /* Exec the batchbuffer */ ret = ring->dispatch_gem_execbuffer(dev, ring, args, - cliprects, exec_offset); + cliprects, exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; @@ -3826,38 +3849,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * Ensure that the commands in the batch buffer are * finished before the interrupt fires */ - flush_domains = i915_retire_commands(dev, ring); + i915_retire_commands(dev, ring); - i915_verify_inactive(dev, __FILE__, __LINE__); - - /* - * Get a seqno representing the execution of the current buffer, - * which we can wait on. We would like to mitigate these interrupts, - * likely by only creating seqnos occasionally (so that we have - * *some* interrupts representing completion of buffers that we can - * wait on when trying to clear up gtt space). - */ - seqno = i915_add_request(dev, file_priv, flush_domains, ring); - BUG_ON(seqno == 0); for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - obj_priv = to_intel_bo(obj); - i915_gem_object_move_to_active(obj, seqno, ring); -#if WATCH_LRU - DRM_INFO("%s: move to exec list %p\n", __func__, obj); -#endif + i915_gem_object_move_to_active(obj, ring); + if (obj->write_domain) + list_move_tail(&to_intel_bo(obj)->gpu_write_list, + &ring->gpu_write_list); } -#if WATCH_LRU - i915_dump_lru(dev, __func__); -#endif - i915_verify_inactive(dev, __FILE__, __LINE__); + i915_add_request(dev, file, request, ring); + request = NULL; err: - for (i = 0; i < pinned; i++) - i915_gem_object_unpin(object_list[i]); - for (i = 0; i < args->buffer_count; i++) { if (object_list[i]) { obj_priv = to_intel_bo(object_list[i]); @@ -3869,22 +3875,9 @@ err: mutex_unlock(&dev->struct_mutex); pre_mutex_err: - /* Copy the updated relocations out regardless of current error - * state. Failure to update the relocs would mean that the next - * time userland calls execbuf, it would do so with presumed offset - * state that didn't match the actual object state. - */ - ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, - relocs); - if (ret2 != 0) { - DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); - - if (ret == 0) - ret = ret2; - } - drm_free_large(object_list); kfree(cliprects); + kfree(request); return ret; } @@ -3941,7 +3934,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; exec2_list[i].alignment = exec_list[i].alignment; exec2_list[i].offset = exec_list[i].offset; - if (!IS_I965G(dev)) + if (INTEL_INFO(dev)->gen < 4) exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; else exec2_list[i].flags = 0; @@ -4038,12 +4031,12 @@ int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); - - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); if (obj_priv->gtt_space != NULL) { if (alignment == 0) @@ -4071,14 +4064,13 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) * remove it from the inactive list */ if (obj_priv->pin_count == 1) { - atomic_inc(&dev->pin_count); - atomic_add(obj->size, &dev->pin_memory); - if (!obj_priv->active && - (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) - list_del_init(&obj_priv->list); + i915_gem_info_add_pin(dev_priv, obj->size); + if (!obj_priv->active) + list_move_tail(&obj_priv->mm_list, + &dev_priv->mm.pinned_list); } - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); return 0; } @@ -4089,7 +4081,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); obj_priv->pin_count--; BUG_ON(obj_priv->pin_count < 0); BUG_ON(obj_priv->gtt_space == NULL); @@ -4099,14 +4091,12 @@ i915_gem_object_unpin(struct drm_gem_object *obj) * the inactive list */ if (obj_priv->pin_count == 0) { - if (!obj_priv->active && - (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) - list_move_tail(&obj_priv->list, + if (!obj_priv->active) + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - atomic_dec(&dev->pin_count); - atomic_sub(obj->size, &dev->pin_memory); + i915_gem_info_remove_pin(dev_priv, obj->size); } - i915_verify_inactive(dev, __FILE__, __LINE__); + WARN_ON(i915_verify_lists(dev)); } int @@ -4118,41 +4108,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", - args->handle); - mutex_unlock(&dev->struct_mutex); - return -ENOENT; + ret = -ENOENT; + goto unlock; } obj_priv = to_intel_bo(obj); if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } obj_priv->user_pin_count++; obj_priv->pin_filp = file_priv; if (obj_priv->user_pin_count == 1) { ret = i915_gem_object_pin(obj, args->alignment); - if (ret != 0) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } /* XXX - flush the CPU caches for pinned objects @@ -4160,10 +4145,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, */ i915_gem_object_flush_cpu_write_domain(obj); args->offset = obj_priv->gtt_offset; +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } int @@ -4173,24 +4159,24 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_pin *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + int ret; - mutex_lock(&dev->struct_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", - args->handle); - mutex_unlock(&dev->struct_mutex); - return -ENOENT; + ret = -ENOENT; + goto unlock; } - obj_priv = to_intel_bo(obj); + if (obj_priv->pin_filp != file_priv) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } obj_priv->user_pin_count--; if (obj_priv->user_pin_count == 0) { @@ -4198,9 +4184,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, i915_gem_object_unpin(obj); } +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } int @@ -4210,22 +4198,24 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_busy *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", - args->handle); - return -ENOENT; + ret = -ENOENT; + goto unlock; } - - mutex_lock(&dev->struct_mutex); + obj_priv = to_intel_bo(obj); /* Count all active objects as busy, even if they are currently not used * by the gpu. Users of this interface expect objects to eventually * become non-busy without any further actions, therefore emit any * necessary flushes here. */ - obj_priv = to_intel_bo(obj); args->busy = obj_priv->active; if (args->busy) { /* Unconditionally flush objects, even when the gpu still uses this @@ -4233,10 +4223,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * use this buffer rather sooner than later, so issuing the required * flush earlier is beneficial. */ - if (obj->write_domain) { - i915_gem_flush(dev, 0, obj->write_domain); - (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring); - } + if (obj->write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(dev, file_priv, + obj_priv->ring, + 0, obj->write_domain); /* Update the active list for the hardware's current position. * Otherwise this only updates on a delayed timer or when irqs @@ -4249,8 +4239,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, } drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } int @@ -4267,6 +4258,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_madvise *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + int ret; switch (args->madv) { case I915_MADV_DONTNEED: @@ -4276,22 +4268,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, return -EINVAL; } + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", - args->handle); - return -ENOENT; + ret = -ENOENT; + goto unlock; } - - mutex_lock(&dev->struct_mutex); obj_priv = to_intel_bo(obj); if (obj_priv->pin_count) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - - DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } if (obj_priv->madv != __I915_MADV_PURGED) @@ -4304,15 +4294,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, args->retained = obj_priv->madv != __I915_MADV_PURGED; +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, size_t size) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; obj = kzalloc(sizeof(*obj), GFP_KERNEL); @@ -4324,18 +4316,19 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, return NULL; } + i915_gem_info_add_obj(dev_priv, size); + obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->agp_type = AGP_USER_MEMORY; obj->base.driver_private = NULL; obj->fence_reg = I915_FENCE_REG_NONE; - INIT_LIST_HEAD(&obj->list); + INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; - trace_i915_gem_object_create(&obj->base); - return &obj->base; } @@ -4355,7 +4348,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) ret = i915_gem_object_unbind(obj); if (ret == -ERESTARTSYS) { - list_move(&obj_priv->list, + list_move(&obj_priv->mm_list, &dev_priv->mm.deferred_free_list); return; } @@ -4364,6 +4357,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) i915_gem_free_mmap_offset(obj); drm_gem_object_release(obj); + i915_gem_info_remove_obj(dev_priv, obj->size); kfree(obj_priv->page_cpu_valid); kfree(obj_priv->bit_17); @@ -4394,10 +4388,7 @@ i915_gem_idle(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.suspended || - (dev_priv->render_ring.gem_object == NULL) || - (HAS_BSD(dev) && - dev_priv->bsd_ring.gem_object == NULL)) { + if (dev_priv->mm.suspended) { mutex_unlock(&dev->struct_mutex); return 0; } @@ -4422,7 +4413,7 @@ i915_gem_idle(struct drm_device *dev) * And not confound mm.suspended! */ dev_priv->mm.suspended = 1; - del_timer(&dev_priv->hangcheck_timer); + del_timer_sync(&dev_priv->hangcheck_timer); i915_kernel_lost_context(dev); i915_gem_cleanup_ringbuffer(dev); @@ -4502,36 +4493,34 @@ i915_gem_init_ringbuffer(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int ret; - dev_priv->render_ring = render_ring; - - if (!I915_NEED_GFX_HWS(dev)) { - dev_priv->render_ring.status_page.page_addr - = dev_priv->status_page_dmah->vaddr; - memset(dev_priv->render_ring.status_page.page_addr, - 0, PAGE_SIZE); - } - if (HAS_PIPE_CONTROL(dev)) { ret = i915_gem_init_pipe_control(dev); if (ret) return ret; } - ret = intel_init_ring_buffer(dev, &dev_priv->render_ring); + ret = intel_init_render_ring_buffer(dev); if (ret) goto cleanup_pipe_control; if (HAS_BSD(dev)) { - dev_priv->bsd_ring = bsd_ring; - ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring); + ret = intel_init_bsd_ring_buffer(dev); if (ret) goto cleanup_render_ring; } + if (HAS_BLT(dev)) { + ret = intel_init_blt_ring_buffer(dev); + if (ret) + goto cleanup_bsd_ring; + } + dev_priv->next_seqno = 1; return 0; +cleanup_bsd_ring: + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); cleanup_render_ring: intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); cleanup_pipe_control: @@ -4546,8 +4535,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); if (HAS_PIPE_CONTROL(dev)) i915_gem_cleanup_pipe_control(dev); } @@ -4576,15 +4565,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, return ret; } - spin_lock(&dev_priv->mm.active_list_lock); + BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); - BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); - spin_unlock(&dev_priv->mm.active_list_lock); - + BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); + BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); - BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); + BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); + BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -4626,28 +4615,34 @@ i915_gem_lastclose(struct drm_device *dev) DRM_ERROR("failed to idle hardware: %d\n", ret); } +static void +init_ring_lists(struct intel_ring_buffer *ring) +{ + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); +} + void i915_gem_load(struct drm_device *dev) { int i; drm_i915_private_t *dev_priv = dev->dev_private; - spin_lock_init(&dev_priv->mm.active_list_lock); + INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); - INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->mm.pinned_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); - INIT_LIST_HEAD(&dev_priv->render_ring.active_list); - INIT_LIST_HEAD(&dev_priv->render_ring.request_list); - if (HAS_BSD(dev)) { - INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); - INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); - } + init_ring_lists(&dev_priv->render_ring); + init_ring_lists(&dev_priv->bsd_ring); + init_ring_lists(&dev_priv->blt_ring); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); + init_completion(&dev_priv->error_completion); spin_lock(&shrink_list_lock); list_add(&dev_priv->mm.shrink_list, &shrink_list); spin_unlock(&shrink_list_lock); @@ -4666,21 +4661,30 @@ i915_gem_load(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev_priv->fence_reg_start = 3; - if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dev_priv->num_fence_regs = 16; else dev_priv->num_fence_regs = 8; /* Initialize fence registers to zero */ - if (IS_I965G(dev)) { + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0); + break; + case 5: + case 4: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); - } else { - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); + break; + case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); + case 2: + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); + break; } i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); @@ -4690,8 +4694,8 @@ i915_gem_load(struct drm_device *dev) * Create a physically contiguous memory object for this object * e.g. for cursor + overlay regs */ -int i915_gem_init_phys_object(struct drm_device *dev, - int id, int size, int align) +static int i915_gem_init_phys_object(struct drm_device *dev, + int id, int size, int align) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_phys_object *phys_obj; @@ -4723,7 +4727,7 @@ kfree_obj: return ret; } -void i915_gem_free_phys_object(struct drm_device *dev, int id) +static void i915_gem_free_phys_object(struct drm_device *dev, int id) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_phys_object *phys_obj; @@ -4868,18 +4872,25 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, return 0; } -void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) +void i915_gem_release(struct drm_device *dev, struct drm_file *file) { - struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; + struct drm_i915_file_private *file_priv = file->driver_priv; /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. */ - mutex_lock(&dev->struct_mutex); - while (!list_empty(&i915_file_priv->mm.request_list)) - list_del_init(i915_file_priv->mm.request_list.next); - mutex_unlock(&dev->struct_mutex); + spin_lock(&file_priv->mm.lock); + while (!list_empty(&file_priv->mm.request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&file_priv->mm.request_list, + struct drm_i915_gem_request, + client_list); + list_del(&request->client_list); + request->file_priv = NULL; + } + spin_unlock(&file_priv->mm.lock); } static int @@ -4888,12 +4899,10 @@ i915_gpu_is_active(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; int lists_empty; - spin_lock(&dev_priv->mm.active_list_lock); lists_empty = list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list); - if (HAS_BSD(dev)) - lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); - spin_unlock(&dev_priv->mm.active_list_lock); + list_empty(&dev_priv->render_ring.active_list) && + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list); return !lists_empty; } @@ -4915,7 +4924,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) if (mutex_trylock(&dev->struct_mutex)) { list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + mm_list) cnt++; mutex_unlock(&dev->struct_mutex); } @@ -4941,7 +4950,7 @@ rescan: list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, - list) { + mm_list) { if (i915_gem_object_is_purgeable(obj_priv)) { i915_gem_object_unbind(&obj_priv->base); if (--nr_to_scan <= 0) @@ -4970,7 +4979,7 @@ rescan: list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, - list) { + mm_list) { if (nr_to_scan > 0) { i915_gem_object_unbind(&obj_priv->base); nr_to_scan--; diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 80f380b1d951..48644b840a8d 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -30,29 +30,112 @@ #include "i915_drm.h" #include "i915_drv.h" -#if WATCH_INACTIVE -void -i915_verify_inactive(struct drm_device *dev, char *file, int line) +#if WATCH_LISTS +int +i915_verify_lists(struct drm_device *dev) { + static int warned; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; + int err = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - obj = &obj_priv->base; - if (obj_priv->pin_count || obj_priv->active || - (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT))) - DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", + if (warned) + return 0; + + list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed render active %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) { + DRM_ERROR("invalid render active %p (a %d r %x)\n", obj, - obj_priv->pin_count, obj_priv->active, - obj->write_domain, file, line); + obj->active, + obj->base.read_domains); + err++; + } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) { + DRM_ERROR("invalid render active %p (w %x, gwl %d)\n", + obj, + obj->base.write_domain, + !list_empty(&obj->gpu_write_list)); + err++; + } } + + list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed flushing %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 || + list_empty(&obj->gpu_write_list)){ + DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n", + obj, + obj->active, + obj->base.write_domain, + !list_empty(&obj->gpu_write_list)); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed gpu write %p\n", obj); + err++; + break; + } else if (!obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) { + DRM_ERROR("invalid gpu write %p (a %d w %x)\n", + obj, + obj->active, + obj->base.write_domain); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed inactive %p\n", obj); + err++; + break; + } else if (obj->pin_count || obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { + DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n", + obj, + obj->pin_count, obj->active, + obj->base.write_domain); + err++; + } + } + + list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) { + if (obj->base.dev != dev || + !atomic_read(&obj->base.refcount.refcount)) { + DRM_ERROR("freed pinned %p\n", obj); + err++; + break; + } else if (!obj->pin_count || obj->active || + (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) { + DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n", + obj, + obj->pin_count, obj->active, + obj->base.write_domain); + err++; + } + } + + return warned = err; } #endif /* WATCH_INACTIVE */ -#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE +#if WATCH_EXEC | WATCH_PWRITE static void i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, uint32_t bias, uint32_t mark) @@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, } #endif -#if WATCH_LRU -void -i915_dump_lru(struct drm_device *dev, const char *where) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - - DRM_INFO("active list %s {\n", where); - spin_lock(&dev_priv->mm.active_list_lock); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, - list) - { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - spin_unlock(&dev_priv->mm.active_list_lock); - DRM_INFO("}\n"); - DRM_INFO("flushing list %s {\n", where); - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, - list) - { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - DRM_INFO("}\n"); - DRM_INFO("inactive %s {\n", where); - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - DRM_INFO("}\n"); -} -#endif - - #if WATCH_COHERENCY void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 5c428fa3e0b3..43a4013f53fa 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -31,49 +31,6 @@ #include "i915_drv.h" #include "i915_drm.h" -static struct drm_i915_gem_object * -i915_gem_next_active_object(struct drm_device *dev, - struct list_head **render_iter, - struct list_head **bsd_iter) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; - - if (*render_iter != &dev_priv->render_ring.active_list) - render_obj = list_entry(*render_iter, - struct drm_i915_gem_object, - list); - - if (HAS_BSD(dev)) { - if (*bsd_iter != &dev_priv->bsd_ring.active_list) - bsd_obj = list_entry(*bsd_iter, - struct drm_i915_gem_object, - list); - - if (render_obj == NULL) { - *bsd_iter = (*bsd_iter)->next; - return bsd_obj; - } - - if (bsd_obj == NULL) { - *render_iter = (*render_iter)->next; - return render_obj; - } - - /* XXX can we handle seqno wrapping? */ - if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { - *render_iter = (*render_iter)->next; - return render_obj; - } else { - *bsd_iter = (*bsd_iter)->next; - return bsd_obj; - } - } else { - *render_iter = (*render_iter)->next; - return render_obj; - } -} - static bool mark_free(struct drm_i915_gem_object *obj_priv, struct list_head *unwind) @@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv, return drm_mm_scan_add_block(obj_priv->gtt_space); } -#define i915_for_each_active_object(OBJ, R, B) \ - *(R) = dev_priv->render_ring.active_list.next; \ - *(B) = dev_priv->bsd_ring.active_list.next; \ - while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) - int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct drm_i915_gem_object *obj_priv; - struct list_head *render_iter, *bsd_iter; int ret = 0; i915_gem_retire_requests(dev); @@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { if (mark_free(obj_priv, &unwind_list)) goto found; } /* Now merge in the soon-to-be-expired objects... */ - i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { /* Does the object require an outstanding flush? */ if (obj_priv->base.write_domain || obj_priv->pin_count) continue; @@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen } /* Finally add anything with a pending flush (in order of retirement) */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { if (obj_priv->pin_count) continue; if (mark_free(obj_priv, &unwind_list)) goto found; } - i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { if (! obj_priv->base.write_domain || obj_priv->pin_count) continue; @@ -212,14 +163,11 @@ i915_gem_evict_everything(struct drm_device *dev) int ret; bool lists_empty; - spin_lock(&dev_priv->mm.active_list_lock); lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) - || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); - + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); if (lists_empty) return -ENOSPC; @@ -234,13 +182,11 @@ i915_gem_evict_everything(struct drm_device *dev) if (ret) return ret; - spin_lock(&dev_priv->mm.active_list_lock); lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) - || list_empty(&dev_priv->bsd_ring.active_list))); - spin_unlock(&dev_priv->mm.active_list_lock); + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!lists_empty); return 0; @@ -258,7 +204,7 @@ i915_gem_evict_inactive(struct drm_device *dev) obj = &list_first_entry(&dev_priv->mm.inactive_list, struct drm_i915_gem_object, - list)->base; + mm_list)->base; ret = i915_gem_object_unbind(obj); if (ret != 0) { diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 710eca70b323..af352de70be1 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { + if (IS_GEN5(dev) || IS_GEN6(dev)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if (!IS_I9XX(dev)) { + } else if (IS_GEN2(dev)) { /* As far as we know, the 865 doesn't have these bit 6 * swizzling issues. */ @@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) if (tiling_mode == I915_TILING_NONE) return true; - if (!IS_I9XX(dev) || + if (IS_GEN2(dev) || (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) tile_width = 128; else tile_width = 512; /* check maximum stride & object size */ - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { /* i965 stores the end address of the gtt mapping in the fence * reg, so dont bother to check the size */ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; - } else if (IS_GEN3(dev) || IS_GEN2(dev)) { + } else { if (stride > 8192) return false; @@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) } /* 965+ just needs multiples of tile width */ - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { if (stride & (tile_width - 1)) return false; return true; @@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) if (tiling_mode == I915_TILING_NONE) return true; - if (!IS_I965G(dev)) { - if (obj_priv->gtt_offset & (obj->size - 1)) + if (INTEL_INFO(dev)->gen >= 4) + return true; + + if (obj_priv->gtt_offset & (obj->size - 1)) + return false; + + if (IS_GEN3(dev)) { + if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) + return false; + } else { + if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) return false; - if (IS_I9XX(dev)) { - if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) - return false; - } else { - if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) - return false; - } } return true; @@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - int ret = 0; + int ret; + + ret = i915_gem_check_is_wedged(dev); + if (ret) + return ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) @@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) ret = i915_gem_object_unbind(obj); else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - ret = i915_gem_object_put_fence_reg(obj); + ret = i915_gem_object_put_fence_reg(obj, true); else i915_gem_release_mmap(obj); @@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, * bit 17 of its physical address and therefore being interpreted differently * by the GPU. */ -static int +static void i915_gem_swizzle_page(struct page *page) { + char temp[64]; char *vaddr; int i; - char temp[64]; vaddr = kmap(page); - if (vaddr == NULL) - return -ENOMEM; for (i = 0; i < PAGE_SIZE; i += 128) { memcpy(temp, &vaddr[i], 64); @@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page) } kunmap(page); - - return 0; } void @@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj_priv->bit_17) != 0)) { - int ret = i915_gem_swizzle_page(obj_priv->pages[i]); - if (ret != 0) { - DRM_ERROR("Failed to swizzle page\n"); - return; - } + i915_gem_swizzle_page(obj_priv->pages[i]); set_page_dirty(obj_priv->pages[i]); } } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b80010f0c4c9..729fd0c91d7b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) } /* For display hotplug interrupt */ -void +static void ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { if ((dev_priv->irq_mask_reg & mask) != 0) { @@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev) else { i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); } @@ -191,12 +191,7 @@ static int i915_pipe_enabled(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; - - if (I915_READ(pipeconf) & PIPEACONF_ENABLE) - return 1; - - return 0; + return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; } /* Called from drm generic code, passed a 'crtc', which @@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long high_frame; unsigned long low_frame; - u32 high1, high2, low, count; - - high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; - low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; + u32 high1, high2, low; if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get vblank count for disabled " @@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) return 0; } + high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; + low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; + /* * High & low register fields aren't synchronized, so make sure * we get a low value that's stable across two reads of the high * register. */ do { - high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> - PIPE_FRAME_HIGH_SHIFT); - low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> - PIPE_FRAME_LOW_SHIFT); - high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> - PIPE_FRAME_HIGH_SHIFT); + high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; + low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; + high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; } while (high1 != high2); - count = (high1 << 8) | low; - - return count; + high1 >>= PIPE_FRAME_HIGH_SHIFT; + low >>= PIPE_FRAME_LOW_SHIFT; + return (high1 << 8) | low; } u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) @@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work) hotplug_work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; + struct intel_encoder *encoder; + + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) + if (encoder->hot_plug) + encoder->hot_plug(encoder); - if (mode_config->num_encoder) { - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - if (intel_encoder->hot_plug) - (*intel_encoder->hot_plug) (intel_encoder); - } - } /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); } @@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev) return; } -irqreturn_t ironlake_irq_handler(struct drm_device *dev) +static void notify_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 seqno = ring->get_seqno(dev, ring); + ring->irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); + wake_up_all(&ring->irq_queue); + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); +} + +static irqreturn_t ironlake_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; u32 de_iir, gt_iir, de_ier, pch_iir; + u32 hotplug_mask; struct drm_i915_master_private *master_priv; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; + + if (IS_GEN6(dev)) + bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); @@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) goto done; + if (HAS_PCH_CPT(dev)) + hotplug_mask = SDE_HOTPLUG_MASK_CPT; + else + hotplug_mask = SDE_HOTPLUG_MASK; + ret = IRQ_HANDLED; if (dev->primary->master) { @@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_PIPE_NOTIFY) { - u32 seqno = render_ring->get_gem_seqno(dev, render_ring); - render_ring->irq_gem_seqno = seqno; - trace_i915_gem_request_complete(dev, seqno); - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); - } - if (gt_iir & GT_BSD_USER_INTERRUPT) - DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); - + if (gt_iir & GT_PIPE_NOTIFY) + notify_ring(dev, &dev_priv->render_ring); + if (gt_iir & bsd_usr_interrupt) + notify_ring(dev, &dev_priv->bsd_ring); + if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) + notify_ring(dev, &dev_priv->blt_ring); if (de_iir & DE_GSE) - ironlake_opregion_gse_intr(dev); + intel_opregion_gse_intr(dev); if (de_iir & DE_PLANEA_FLIP_DONE) { intel_prepare_page_flip(dev, 0); - intel_finish_page_flip(dev, 0); + intel_finish_page_flip_plane(dev, 0); } if (de_iir & DE_PLANEB_FLIP_DONE) { intel_prepare_page_flip(dev, 1); - intel_finish_page_flip(dev, 1); + intel_finish_page_flip_plane(dev, 1); } if (de_iir & DE_PIPEA_VBLANK) @@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) drm_handle_vblank(dev, 1); /* check event from PCH */ - if ((de_iir & DE_PCH_EVENT) && - (pch_iir & SDE_HOTPLUG_MASK)) { + if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - } if (de_iir & DE_PCU_EVENT) { I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); @@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work) char *reset_event[] = { "RESET=1", NULL }; char *reset_done_event[] = { "ERROR=0", NULL }; - DRM_DEBUG_DRIVER("generating error event\n"); kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); if (atomic_read(&dev_priv->mm.wedged)) { - if (IS_I965G(dev)) { - DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); - if (!i965_reset(dev, GDRST_RENDER)) { - atomic_set(&dev_priv->mm.wedged, 0); - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); - } - } else { - DRM_DEBUG_DRIVER("reboot required\n"); + DRM_DEBUG_DRIVER("resetting chip\n"); + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); + if (!i915_reset(dev, GRDOM_RENDER)) { + atomic_set(&dev_priv->mm.wedged, 0); + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); } + complete_all(&dev_priv->error_completion); } } +#ifdef CONFIG_DEBUG_FS static struct drm_i915_error_object * i915_error_object_create(struct drm_device *dev, struct drm_gem_object *src) @@ -510,7 +510,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring) if (IS_I830(dev) || IS_845G(dev)) cmd = MI_BATCH_BUFFER; - else if (IS_I965G(dev)) + else if (INTEL_INFO(dev)->gen >= 4) cmd = (MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); else @@ -583,13 +583,16 @@ static void i915_capture_error_state(struct drm_device *dev) return; } - error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring); + DRM_DEBUG_DRIVER("generating error event\n"); + + error->seqno = + dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); error->pipebstat = I915_READ(PIPEBSTAT); error->instpm = I915_READ(INSTPM); - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { error->ipeir = I915_READ(IPEIR); error->ipehr = I915_READ(IPEHR); error->instdone = I915_READ(INSTDONE); @@ -611,9 +614,7 @@ static void i915_capture_error_state(struct drm_device *dev) batchbuffer[0] = NULL; batchbuffer[1] = NULL; count = 0; - list_for_each_entry(obj_priv, - &dev_priv->render_ring.active_list, list) { - + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -630,7 +631,7 @@ static void i915_capture_error_state(struct drm_device *dev) } /* Scan the other lists for completeness for those bizarre errors. */ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -648,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev) } } if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -667,7 +668,7 @@ static void i915_capture_error_state(struct drm_device *dev) } /* We need to copy these to an anonymous buffer as the simplest - * method to avoid being overwritten by userpace. + * method to avoid being overwritten by userspace. */ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); if (batchbuffer[1] != batchbuffer[0]) @@ -689,8 +690,7 @@ static void i915_capture_error_state(struct drm_device *dev) if (error->active_bo) { int i = 0; - list_for_each_entry(obj_priv, - &dev_priv->render_ring.active_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; error->active_bo[i].size = obj->size; @@ -743,6 +743,9 @@ void i915_destroy_error_state(struct drm_device *dev) if (error) i915_error_state_free(dev, error); } +#else +#define i915_capture_error_state(x) +#endif static void i915_report_and_clear_eir(struct drm_device *dev) { @@ -784,7 +787,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) } } - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); printk(KERN_ERR "page table error\n"); @@ -810,7 +813,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR "instruction error\n"); printk(KERN_ERR " INSTPM: 0x%08x\n", I915_READ(INSTPM)); - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { u32 ipeir = I915_READ(IPEIR); printk(KERN_ERR " IPEIR: 0x%08x\n", @@ -875,12 +878,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) i915_report_and_clear_eir(dev); if (wedged) { + INIT_COMPLETION(dev_priv->error_completion); atomic_set(&dev_priv->mm.wedged, 1); /* * Wakeup waiting processes so they don't hang */ - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->render_ring.irq_queue); + if (HAS_BSD(dev)) + wake_up_all(&dev_priv->bsd_ring.irq_queue); + if (HAS_BLT(dev)) + wake_up_all(&dev_priv->blt_ring.irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); @@ -911,7 +919,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ obj_priv = to_intel_bo(work->pending_flip_obj); - if(IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; } else { @@ -941,7 +949,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) unsigned long irqflags; int irq_received; int ret = IRQ_NONE; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; atomic_inc(&dev_priv->irq_received); @@ -950,7 +957,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) iir = I915_READ(IIR); - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; else vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; @@ -1018,18 +1025,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) READ_BREADCRUMB(dev_priv); } - if (iir & I915_USER_INTERRUPT) { - u32 seqno = - render_ring->get_gem_seqno(dev, render_ring); - render_ring->irq_gem_seqno = seqno; - trace_i915_gem_request_complete(dev, seqno); - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); - } - + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->render_ring); if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) - DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + notify_ring(dev, &dev_priv->bsd_ring); if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); @@ -1064,7 +1063,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || (iir & I915_ASLE_INTERRUPT)) - opregion_asle_intr(dev); + intel_opregion_asle_intr(dev); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got @@ -1206,18 +1205,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - u32 pipeconf; - pipeconf = I915_READ(pipeconf_reg); - if (!(pipeconf & PIPEACONF_ENABLE)) + if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); - else if (IS_I965G(dev)) + else if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_ENABLE); else @@ -1251,7 +1247,7 @@ void i915_enable_interrupt (struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; if (!HAS_PCH_SPLIT(dev)) - opregion_enable_asle(dev); + intel_opregion_enable_asle(dev); dev_priv->irq_enabled = 1; } @@ -1310,7 +1306,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } -struct drm_i915_gem_request * +static struct drm_i915_gem_request * i915_get_tail_request(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -1330,11 +1326,7 @@ void i915_hangcheck_elapsed(unsigned long data) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t acthd, instdone, instdone1; - /* No reset support on this chip yet. */ - if (IS_GEN6(dev)) - return; - - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { acthd = I915_READ(ACTHD); instdone = I915_READ(INSTDONE); instdone1 = 0; @@ -1346,9 +1338,8 @@ void i915_hangcheck_elapsed(unsigned long data) /* If all work is done then ACTHD clearly hasn't advanced. */ if (list_empty(&dev_priv->render_ring.request_list) || - i915_seqno_passed(i915_get_gem_seqno(dev, - &dev_priv->render_ring), - i915_get_tail_request(dev)->seqno)) { + i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), + i915_get_tail_request(dev)->seqno)) { bool missed_wakeup = false; dev_priv->hangcheck_count = 0; @@ -1356,13 +1347,19 @@ void i915_hangcheck_elapsed(unsigned long data) /* Issue a wake-up to catch stuck h/w. */ if (dev_priv->render_ring.waiting_gem_seqno && waitqueue_active(&dev_priv->render_ring.irq_queue)) { - DRM_WAKEUP(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->render_ring.irq_queue); missed_wakeup = true; } if (dev_priv->bsd_ring.waiting_gem_seqno && waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { - DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); + wake_up_all(&dev_priv->bsd_ring.irq_queue); + missed_wakeup = true; + } + + if (dev_priv->blt_ring.waiting_gem_seqno && + waitqueue_active(&dev_priv->blt_ring.irq_queue)) { + wake_up_all(&dev_priv->blt_ring.irq_queue); missed_wakeup = true; } @@ -1376,6 +1373,21 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->last_instdone1 == instdone1) { if (dev_priv->hangcheck_count++ > 1) { DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + + if (!IS_GEN2(dev)) { + /* Is the chip hanging on a WAIT_FOR_EVENT? + * If so we can simply poke the RB_WAIT bit + * and break the hang. This should work on + * all but the second generation chipsets. + */ + u32 tmp = I915_READ(PRB0_CTL); + if (tmp & RING_WAIT) { + I915_WRITE(PRB0_CTL, tmp); + POSTING_READ(PRB0_CTL); + goto out; + } + } + i915_handle_error(dev, true); return; } @@ -1387,8 +1399,10 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->last_instdone1 = instdone1; } +out: /* Reset timer case chip hangs without another request being added */ - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } /* drm_dma.h hooks @@ -1423,8 +1437,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; - u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | - SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + u32 hotplug_mask; dev_priv->irq_mask_reg = ~display_mask; dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; @@ -1435,20 +1448,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); (void) I915_READ(DEIER); - /* Gen6 only needs render pipe_control now */ - if (IS_GEN6(dev)) - render_mask = GT_PIPE_NOTIFY; + if (IS_GEN6(dev)) { + render_mask = + GT_PIPE_NOTIFY | + GT_GEN6_BSD_USER_INTERRUPT | + GT_BLT_USER_INTERRUPT; + } dev_priv->gt_irq_mask_reg = ~render_mask; dev_priv->gt_irq_enable_reg = render_mask; I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); - if (IS_GEN6(dev)) + if (IS_GEN6(dev)) { I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); + I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); + I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); + } + I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); (void) I915_READ(GTIER); + if (HAS_PCH_CPT(dev)) { + hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | + SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; + } else { + hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | + SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + } + dev_priv->pch_irq_mask_reg = ~hotplug_mask; dev_priv->pch_irq_enable_reg = hotplug_mask; @@ -1505,9 +1533,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) u32 error_mask; DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); - if (HAS_BSD(dev)) DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); + if (HAS_BLT(dev)) + DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; @@ -1577,7 +1606,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); } - opregion_enable_asle(dev); + intel_opregion_enable_asle(dev); return 0; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4f5e15577e89..25ed911a3112 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -25,52 +25,16 @@ #ifndef _I915_REG_H_ #define _I915_REG_H_ +#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. + * This is all handled in the intel-gtt.ko module. i915.ko only + * cares about the vga bit for the vga rbiter. */ #define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_VGA_DISABLE (1 << 1) -#define INTEL_GMCH_ENABLED 0x4 -#define INTEL_GMCH_MEM_MASK 0x1 -#define INTEL_GMCH_MEM_64M 0x1 -#define INTEL_GMCH_MEM_128M 0 - -#define INTEL_GMCH_GMS_MASK (0xf << 4) -#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) - -#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) -#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) -#define INTEL_GMCH_GMS_STOLEN_128M (0x8 << 4) -#define INTEL_GMCH_GMS_STOLEN_256M (0x9 << 4) -#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) -#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) -#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) -#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) - -#define SNB_GMCH_CTRL 0x50 -#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 -#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) -#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) -#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) -#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) -#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) -#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) -#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) -#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) -#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) -#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) -#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) -#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) -#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) -#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) -#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) -#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) /* PCI config space */ @@ -106,10 +70,13 @@ #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) #define LBB 0xf4 -#define GDRST 0xc0 -#define GDRST_FULL (0<<2) -#define GDRST_RENDER (1<<2) -#define GDRST_MEDIA (3<<2) + +/* Graphics reset regs */ +#define I965_GDRST 0xc0 /* PCI config register */ +#define ILK_GDSR 0x2ca4 /* MCHBAR offset */ +#define GRDOM_FULL (0<<2) +#define GRDOM_RENDER (1<<2) +#define GRDOM_MEDIA (3<<2) /* VGA stuff */ @@ -192,11 +159,11 @@ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX_SHIFT 2 #define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) +#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) - /* * 3D instructions used by the kernel */ @@ -249,6 +216,16 @@ #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ + +/* + * Reset registers + */ +#define DEBUG_RESET_I830 0x6070 +#define DEBUG_RESET_FULL (1<<7) +#define DEBUG_RESET_RENDER (1<<8) +#define DEBUG_RESET_DISPLAY (1<<9) + + /* * Fence registers */ @@ -283,6 +260,17 @@ #define PRB0_HEAD 0x02034 #define PRB0_START 0x02038 #define PRB0_CTL 0x0203c +#define RENDER_RING_BASE 0x02000 +#define BSD_RING_BASE 0x04000 +#define GEN6_BSD_RING_BASE 0x12000 +#define BLT_RING_BASE 0x22000 +#define RING_TAIL(base) ((base)+0x30) +#define RING_HEAD(base) ((base)+0x34) +#define RING_START(base) ((base)+0x38) +#define RING_CTL(base) ((base)+0x3c) +#define RING_HWS_PGA(base) ((base)+0x80) +#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) +#define RING_ACTHD(base) ((base)+0x74) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -295,6 +283,8 @@ #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 +#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ +#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ #define PRB1_TAIL 0x02040 /* 915+ only */ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ @@ -306,7 +296,6 @@ #define INSTDONE1 0x0207c /* 965+ only */ #define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 -#define HWS_PGA_GEN6 0x04080 #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA 0x2088 /* 965GM+ only */ @@ -464,17 +453,17 @@ #define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25) #define GEN6_BLITTER_SYNC_STATUS (1 << 24) #define GEN6_BLITTER_USER_INTERRUPT (1 << 22) -/* - * BSD (bit stream decoder instruction and interrupt control register defines - * (G4X and Ironlake only) - */ -#define BSD_RING_TAIL 0x04030 -#define BSD_RING_HEAD 0x04034 -#define BSD_RING_START 0x04038 -#define BSD_RING_CTL 0x0403c -#define BSD_RING_ACTHD 0x04074 -#define BSD_HWS_PGA 0x04080 +#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 +#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) +#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) +#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 +#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) + +#define GEN6_BSD_IMR 0x120a8 +#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) + +#define GEN6_BSD_RNCID 0x12198 /* * Framebuffer compression (915+ only) @@ -579,12 +568,51 @@ # define GPIO_DATA_VAL_IN (1 << 12) # define GPIO_DATA_PULLUP_DISABLE (1 << 13) -#define GMBUS0 0x5100 -#define GMBUS1 0x5104 -#define GMBUS2 0x5108 -#define GMBUS3 0x510c -#define GMBUS4 0x5110 -#define GMBUS5 0x5120 +#define GMBUS0 0x5100 /* clock/port select */ +#define GMBUS_RATE_100KHZ (0<<8) +#define GMBUS_RATE_50KHZ (1<<8) +#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ +#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ +#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ +#define GMBUS_PORT_DISABLED 0 +#define GMBUS_PORT_SSC 1 +#define GMBUS_PORT_VGADDC 2 +#define GMBUS_PORT_PANEL 3 +#define GMBUS_PORT_DPC 4 /* HDMIC */ +#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ + /* 6 reserved */ +#define GMBUS_PORT_DPD 7 /* HDMID */ +#define GMBUS_NUM_PORTS 8 +#define GMBUS1 0x5104 /* command/status */ +#define GMBUS_SW_CLR_INT (1<<31) +#define GMBUS_SW_RDY (1<<30) +#define GMBUS_ENT (1<<29) /* enable timeout */ +#define GMBUS_CYCLE_NONE (0<<25) +#define GMBUS_CYCLE_WAIT (1<<25) +#define GMBUS_CYCLE_INDEX (2<<25) +#define GMBUS_CYCLE_STOP (4<<25) +#define GMBUS_BYTE_COUNT_SHIFT 16 +#define GMBUS_SLAVE_INDEX_SHIFT 8 +#define GMBUS_SLAVE_ADDR_SHIFT 1 +#define GMBUS_SLAVE_READ (1<<0) +#define GMBUS_SLAVE_WRITE (0<<0) +#define GMBUS2 0x5108 /* status */ +#define GMBUS_INUSE (1<<15) +#define GMBUS_HW_WAIT_PHASE (1<<14) +#define GMBUS_STALL_TIMEOUT (1<<13) +#define GMBUS_INT (1<<12) +#define GMBUS_HW_RDY (1<<11) +#define GMBUS_SATOER (1<<10) +#define GMBUS_ACTIVE (1<<9) +#define GMBUS3 0x510c /* data buffer bytes 3-0 */ +#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ +#define GMBUS_SLAVE_TIMEOUT_EN (1<<4) +#define GMBUS_NAK_EN (1<<3) +#define GMBUS_IDLE_EN (1<<2) +#define GMBUS_HW_WAIT_EN (1<<1) +#define GMBUS_HW_RDY_EN (1<<0) +#define GMBUS5 0x5120 /* byte index */ +#define GMBUS_2BYTE_INDEX_EN (1<<31) /* * Clock control & power management @@ -603,6 +631,7 @@ #define VGA1_PD_P1_MASK (0x1f << 8) #define DPLL_A 0x06014 #define DPLL_B 0x06018 +#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B) #define DPLL_VCO_ENABLE (1 << 31) #define DPLL_DVO_HIGH_SPEED (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) @@ -633,31 +662,6 @@ #define LVDS 0x61180 #define LVDS_ON (1<<31) -#define ADPA 0x61100 -#define ADPA_DPMS_MASK (~(3<<10)) -#define ADPA_DPMS_ON (0<<10) -#define ADPA_DPMS_SUSPEND (1<<10) -#define ADPA_DPMS_STANDBY (2<<10) -#define ADPA_DPMS_OFF (3<<10) - -#define RING_TAIL 0x00 -#define TAIL_ADDR 0x001FFFF8 -#define RING_HEAD 0x04 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_START 0x08 -#define START_ADDR 0xFFFFF000 -#define RING_LEN 0x0C -#define RING_NR_PAGES 0x001FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 - /* Scratch pad debug 0 reg: */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 @@ -736,10 +740,13 @@ #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define DPLL_B_MD 0x06020 /* 965+ only */ +#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD) #define FPA0 0x06040 #define FPA1 0x06044 #define FPB0 0x06048 #define FPB1 0x0604c +#define FP0(pipe) _PIPE(pipe, FPA0, FPB0) +#define FP1(pipe) _PIPE(pipe, FPA1, FPB1) #define FP_N_DIV_MASK 0x003f0000 #define FP_N_PINEVIEW_DIV_MASK 0x00ff0000 #define FP_N_DIV_SHIFT 16 @@ -760,6 +767,7 @@ #define DPLLA_TEST_M_BYPASS (1 << 2) #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) #define D_STATE 0x6104 +#define DSTATE_GFX_RESET_I830 (1<<6) #define DSTATE_PLL_D3_OFF (1<<3) #define DSTATE_GFX_CLOCK_GATING (1<<1) #define DSTATE_DOT_CLOCK_GATING (1<<0) @@ -926,6 +934,8 @@ #define CLKCFG_MEM_800 (3 << 4) #define CLKCFG_MEM_MASK (7 << 4) +#define TSC1 0x11001 +#define TSE (1<<0) #define TR1 0x11006 #define TSFS 0x11020 #define TSFS_SLOPE_MASK 0x0000ff00 @@ -1070,6 +1080,8 @@ #define MEMSTAT_SRC_CTL_STDBY 3 #define RCPREVBSYTUPAVG 0x113b8 #define RCPREVBSYTDNAVG 0x113bc +#define PMMISC 0x11214 +#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ #define SDEW 0x1124c #define CSIEW0 0x11250 #define CSIEW1 0x11254 @@ -1150,6 +1162,15 @@ #define PIPEBSRC 0x6101c #define BCLRPAT_B 0x61020 +#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B) +#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B) +#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B) +#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) +#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) +#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) +#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) +#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) + /* VGA port control */ #define ADPA 0x61100 #define ADPA_DAC_ENABLE (1<<31) @@ -1173,6 +1194,7 @@ #define ADPA_DPMS_STANDBY (2<<10) #define ADPA_DPMS_OFF (3<<10) + /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN 0x61110 #define HDMIB_HOTPLUG_INT_EN (1 << 29) @@ -1331,6 +1353,22 @@ #define LVDS_B0B3_POWER_DOWN (0 << 2) #define LVDS_B0B3_POWER_UP (3 << 2) +/* Video Data Island Packet control */ +#define VIDEO_DIP_DATA 0x61178 +#define VIDEO_DIP_CTL 0x61170 +#define VIDEO_DIP_ENABLE (1 << 31) +#define VIDEO_DIP_PORT_B (1 << 29) +#define VIDEO_DIP_PORT_C (2 << 29) +#define VIDEO_DIP_ENABLE_AVI (1 << 21) +#define VIDEO_DIP_ENABLE_VENDOR (2 << 21) +#define VIDEO_DIP_ENABLE_SPD (8 << 21) +#define VIDEO_DIP_SELECT_AVI (0 << 19) +#define VIDEO_DIP_SELECT_VENDOR (1 << 19) +#define VIDEO_DIP_SELECT_SPD (3 << 19) +#define VIDEO_DIP_FREQ_ONCE (0 << 16) +#define VIDEO_DIP_FREQ_VSYNC (1 << 16) +#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) + /* Panel power sequencing */ #define PP_STATUS 0x61200 #define PP_ON (1 << 31) @@ -1346,6 +1384,9 @@ #define PP_SEQUENCE_ON (1 << 28) #define PP_SEQUENCE_OFF (2 << 28) #define PP_SEQUENCE_MASK 0x30000000 +#define PP_CYCLE_DELAY_ACTIVE (1 << 27) +#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) +#define PP_SEQUENCE_STATE_MASK 0x0000000f #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) #define PP_ON_DELAYS 0x61208 @@ -1481,6 +1522,7 @@ # define TV_TEST_MODE_MASK (7 << 0) #define TV_DAC 0x68004 +# define TV_DAC_SAVE 0x00ffff00 /** * Reports that DAC state change logic has reported change (RO). * @@ -2075,29 +2117,35 @@ /* Display & cursor control */ -/* dithering flag on Ironlake */ -#define PIPE_ENABLE_DITHER (1 << 4) -#define PIPE_DITHER_TYPE_MASK (3 << 2) -#define PIPE_DITHER_TYPE_SPATIAL (0 << 2) -#define PIPE_DITHER_TYPE_ST01 (1 << 2) /* Pipe A */ #define PIPEADSL 0x70000 -#define DSL_LINEMASK 0x00000fff +#define DSL_LINEMASK 0x00000fff #define PIPEACONF 0x70008 -#define PIPEACONF_ENABLE (1<<31) -#define PIPEACONF_DISABLE 0 -#define PIPEACONF_DOUBLE_WIDE (1<<30) +#define PIPECONF_ENABLE (1<<31) +#define PIPECONF_DISABLE 0 +#define PIPECONF_DOUBLE_WIDE (1<<30) #define I965_PIPECONF_ACTIVE (1<<30) -#define PIPEACONF_SINGLE_WIDE 0 -#define PIPEACONF_PIPE_UNLOCKED 0 -#define PIPEACONF_PIPE_LOCKED (1<<25) -#define PIPEACONF_PALETTE 0 -#define PIPEACONF_GAMMA (1<<24) +#define PIPECONF_SINGLE_WIDE 0 +#define PIPECONF_PIPE_UNLOCKED 0 +#define PIPECONF_PIPE_LOCKED (1<<25) +#define PIPECONF_PALETTE 0 +#define PIPECONF_GAMMA (1<<24) #define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_PROGRESSIVE (0 << 21) #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) #define PIPECONF_CXSR_DOWNCLOCK (1<<16) +#define PIPECONF_BPP_MASK (0x000000e0) +#define PIPECONF_BPP_8 (0<<5) +#define PIPECONF_BPP_10 (1<<5) +#define PIPECONF_BPP_6 (2<<5) +#define PIPECONF_BPP_12 (3<<5) +#define PIPECONF_DITHER_EN (1<<4) +#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) +#define PIPECONF_DITHER_TYPE_SP (0<<2) +#define PIPECONF_DITHER_TYPE_ST1 (1<<2) +#define PIPECONF_DITHER_TYPE_ST2 (2<<2) +#define PIPECONF_DITHER_TYPE_TEMP (3<<2) #define PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) #define PIPE_CRC_ERROR_ENABLE (1UL<<29) @@ -2128,12 +2176,15 @@ #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) -#define PIPE_BPC_MASK (7 << 5) /* Ironlake */ +#define PIPE_BPC_MASK (7 << 5) /* Ironlake */ #define PIPE_8BPC (0 << 5) #define PIPE_10BPC (1 << 5) #define PIPE_6BPC (2 << 5) #define PIPE_12BPC (3 << 5) +#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) +#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) + #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 @@ -2206,8 +2257,8 @@ #define WM1_LP_SR_EN (1<<31) #define WM1_LP_LATENCY_SHIFT 24 #define WM1_LP_LATENCY_MASK (0x7f<<24) -#define WM1_LP_FBC_LP1_MASK (0xf<<20) -#define WM1_LP_FBC_LP1_SHIFT 20 +#define WM1_LP_FBC_MASK (0xf<<20) +#define WM1_LP_FBC_SHIFT 20 #define WM1_LP_SR_MASK (0x1ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0x3f) @@ -2333,6 +2384,14 @@ #define DSPASURF 0x7019C /* 965+ only */ #define DSPATILEOFF 0x701A4 /* 965+ only */ +#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR) +#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR) +#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE) +#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS) +#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE) +#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF) +#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF) + /* VBIOS flags */ #define SWF00 0x71410 #define SWF01 0x71414 @@ -2397,6 +2456,7 @@ #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 #define FDI_PLL_BIOS_0 0x46000 +#define FDI_PLL_FB_CLOCK_MASK 0xff #define FDI_PLL_BIOS_1 0x46004 #define FDI_PLL_BIOS_2 0x46008 #define DISPLAY_PORT_PLL_BIOS_0 0x4600c @@ -2420,46 +2480,47 @@ #define PIPEA_DATA_M1 0x60030 #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ #define TU_SIZE_MASK 0x7e000000 -#define PIPEA_DATA_M1_OFFSET 0 +#define PIPE_DATA_M1_OFFSET 0 #define PIPEA_DATA_N1 0x60034 -#define PIPEA_DATA_N1_OFFSET 0 +#define PIPE_DATA_N1_OFFSET 0 #define PIPEA_DATA_M2 0x60038 -#define PIPEA_DATA_M2_OFFSET 0 +#define PIPE_DATA_M2_OFFSET 0 #define PIPEA_DATA_N2 0x6003c -#define PIPEA_DATA_N2_OFFSET 0 +#define PIPE_DATA_N2_OFFSET 0 #define PIPEA_LINK_M1 0x60040 -#define PIPEA_LINK_M1_OFFSET 0 +#define PIPE_LINK_M1_OFFSET 0 #define PIPEA_LINK_N1 0x60044 -#define PIPEA_LINK_N1_OFFSET 0 +#define PIPE_LINK_N1_OFFSET 0 #define PIPEA_LINK_M2 0x60048 -#define PIPEA_LINK_M2_OFFSET 0 +#define PIPE_LINK_M2_OFFSET 0 #define PIPEA_LINK_N2 0x6004c -#define PIPEA_LINK_N2_OFFSET 0 +#define PIPE_LINK_N2_OFFSET 0 /* PIPEB timing regs are same start from 0x61000 */ #define PIPEB_DATA_M1 0x61030 -#define PIPEB_DATA_M1_OFFSET 0 #define PIPEB_DATA_N1 0x61034 -#define PIPEB_DATA_N1_OFFSET 0 #define PIPEB_DATA_M2 0x61038 -#define PIPEB_DATA_M2_OFFSET 0 #define PIPEB_DATA_N2 0x6103c -#define PIPEB_DATA_N2_OFFSET 0 #define PIPEB_LINK_M1 0x61040 -#define PIPEB_LINK_M1_OFFSET 0 #define PIPEB_LINK_N1 0x61044 -#define PIPEB_LINK_N1_OFFSET 0 #define PIPEB_LINK_M2 0x61048 -#define PIPEB_LINK_M2_OFFSET 0 #define PIPEB_LINK_N2 0x6104c -#define PIPEB_LINK_N2_OFFSET 0 + +#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1) +#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1) +#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2) +#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2) +#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1) +#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1) +#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2) +#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2) /* CPU panel fitter */ #define PFA_CTL_1 0x68080 @@ -2516,7 +2577,8 @@ #define GT_SYNC_STATUS (1 << 2) #define GT_USER_INTERRUPT (1 << 0) #define GT_BSD_USER_INTERRUPT (1 << 5) - +#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) +#define GT_BLT_USER_INTERRUPT (1 << 22) #define GTISR 0x44010 #define GTIMR 0x44014 @@ -2551,6 +2613,10 @@ #define SDE_PORTD_HOTPLUG_CPT (1 << 23) #define SDE_PORTC_HOTPLUG_CPT (1 << 22) #define SDE_PORTB_HOTPLUG_CPT (1 << 21) +#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ + SDE_PORTD_HOTPLUG_CPT | \ + SDE_PORTC_HOTPLUG_CPT | \ + SDE_PORTB_HOTPLUG_CPT) #define SDEISR 0xc4000 #define SDEIMR 0xc4004 @@ -2600,11 +2666,14 @@ #define PCH_DPLL_A 0xc6014 #define PCH_DPLL_B 0xc6018 +#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) #define PCH_FPA0 0xc6040 #define PCH_FPA1 0xc6044 #define PCH_FPB0 0xc6048 #define PCH_FPB1 0xc604c +#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0) +#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1) #define PCH_DPLL_TEST 0xc606c @@ -2690,6 +2759,13 @@ #define TRANS_VBLANK_B 0xe1010 #define TRANS_VSYNC_B 0xe1014 +#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B) +#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B) +#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B) +#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B) +#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B) +#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B) + #define TRANSB_DATA_M1 0xe1030 #define TRANSB_DATA_N1 0xe1034 #define TRANSB_DATA_M2 0xe1038 @@ -2701,6 +2777,7 @@ #define TRANSACONF 0xf0008 #define TRANSBCONF 0xf1008 +#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF) #define TRANS_DISABLE (0<<31) #define TRANS_ENABLE (1<<31) #define TRANS_STATE_MASK (1<<30) @@ -2721,10 +2798,15 @@ #define FDI_RXA_CHICKEN 0xc200c #define FDI_RXB_CHICKEN 0xc2010 #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) +#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) + +#define SOUTH_DSPCLK_GATE_D 0xc2020 +#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) /* CPU: FDI_TX */ #define FDI_TXA_CTL 0x60100 #define FDI_TXB_CTL 0x61100 +#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL) #define FDI_TX_DISABLE (0<<31) #define FDI_TX_ENABLE (1<<31) #define FDI_LINK_TRAIN_PATTERN_1 (0<<28) @@ -2766,8 +2848,8 @@ /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ #define FDI_RXA_CTL 0xf000c #define FDI_RXB_CTL 0xf100c +#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL) #define FDI_RX_ENABLE (1<<31) -#define FDI_RX_DISABLE (0<<31) /* train, dp width same as FDI_TX */ #define FDI_DP_PORT_WIDTH_X8 (7<<19) #define FDI_8BPC (0<<16) @@ -2782,8 +2864,7 @@ #define FDI_FS_ERR_REPORT_ENABLE (1<<9) #define FDI_FE_ERR_REPORT_ENABLE (1<<8) #define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) -#define FDI_SEL_RAWCLK (0<<4) -#define FDI_SEL_PCDCLK (1<<4) +#define FDI_PCDCLK (1<<4) /* CPT */ #define FDI_AUTO_TRAINING (1<<10) #define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) @@ -2798,6 +2879,9 @@ #define FDI_RXA_TUSIZE2 0xf0038 #define FDI_RXB_TUSIZE1 0xf1030 #define FDI_RXB_TUSIZE2 0xf1038 +#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC) +#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1) +#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2) /* FDI_RX interrupt register format */ #define FDI_RX_INTER_LANE_ALIGN (1<<10) @@ -2816,6 +2900,8 @@ #define FDI_RXA_IMR 0xf0018 #define FDI_RXB_IIR 0xf1014 #define FDI_RXB_IMR 0xf1018 +#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR) +#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR) #define FDI_PLL_CTL_1 0xfe000 #define FDI_PLL_CTL_2 0xfe004 @@ -2935,6 +3021,7 @@ #define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_B 0xe1300 #define TRANS_DP_CTL_C 0xe2300 +#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) #define TRANS_DP_OUTPUT_ENABLE (1<<31) #define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_C (1<<29) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 31f08581e93a..989c19d2d959 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveFPA1 = I915_READ(FPA1); dev_priv->saveDPLL_A = I915_READ(DPLL_A); } - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); @@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPASURF = I915_READ(DSPASURF); dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); } @@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveFPB1 = I915_READ(FPB1); dev_priv->saveDPLL_B = I915_READ(DPLL_B); } - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); @@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); - if (IS_I965GM(dev) || IS_GM45(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); } @@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); POSTING_READ(dpll_a_reg); udelay(150); - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); POSTING_READ(DPLL_A_MD); } @@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); } @@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); POSTING_READ(dpll_b_reg); udelay(150); - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); POSTING_READ(DPLL_B_MD); } @@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } @@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev) dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); dev_priv->saveCURBPOS = I915_READ(CURBPOS); dev_priv->saveCURBBASE = I915_READ(CURBBASE); - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) dev_priv->saveCURSIZE = I915_READ(CURSIZE); /* CRT state */ @@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev) dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); if (IS_MOBILE(dev) && !IS_I830(dev)) dev_priv->saveLVDS = I915_READ(LVDS); @@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev) I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); /* CRT state */ @@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev) I915_WRITE(ADPA, dev_priv->saveADPA); /* LVDS state */ - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); if (HAS_PCH_SPLIT(dev)) { @@ -878,9 +878,7 @@ int i915_restore_state(struct drm_device *dev) for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); - /* I2C state */ - intel_i2c_reset_gmbus(dev); + intel_i2c_reset(dev); return 0; } - diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c new file mode 100644 index 000000000000..65c88f9ba12c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -0,0 +1,286 @@ +/* + * Intel ACPI functions + * + * _DSM related code stolen from nouveau_acpi.c. + */ +#include +#include +#include +#include + +#include "drmP.h" + +#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ + +#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ +#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ + +static struct intel_dsm_priv { + acpi_handle dhandle; +} intel_dsm_priv; + +static const u8 intel_dsm_guid[] = { + 0xd3, 0x73, 0xd8, 0x7e, + 0xd0, 0xc2, + 0x4f, 0x4e, + 0xa8, 0x54, + 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c +}; + +static int intel_dsm(acpi_handle handle, int func, int arg) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *obj; + u32 result; + int ret = 0; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(intel_dsm_guid); + params[0].buffer.pointer = (char *)intel_dsm_guid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = INTEL_DSM_REVISION_ID; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = func; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = arg; + + ret = acpi_evaluate_object(handle, "_DSM", &input, &output); + if (ret) { + DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); + return ret; + } + + obj = (union acpi_object *)output.pointer; + + result = 0; + switch (obj->type) { + case ACPI_TYPE_INTEGER: + result = obj->integer.value; + break; + + case ACPI_TYPE_BUFFER: + if (obj->buffer.length == 4) { + result =(obj->buffer.pointer[0] | + (obj->buffer.pointer[1] << 8) | + (obj->buffer.pointer[2] << 16) | + (obj->buffer.pointer[3] << 24)); + break; + } + default: + ret = -EINVAL; + break; + } + if (result == 0x80000002) + ret = -ENODEV; + + kfree(output.pointer); + return ret; +} + +static char *intel_dsm_port_name(u8 id) +{ + switch (id) { + case 0: + return "Reserved"; + case 1: + return "Analog VGA"; + case 2: + return "LVDS"; + case 3: + return "Reserved"; + case 4: + return "HDMI/DVI_B"; + case 5: + return "HDMI/DVI_C"; + case 6: + return "HDMI/DVI_D"; + case 7: + return "DisplayPort_A"; + case 8: + return "DisplayPort_B"; + case 9: + return "DisplayPort_C"; + case 0xa: + return "DisplayPort_D"; + case 0xb: + case 0xc: + case 0xd: + return "Reserved"; + case 0xe: + return "WiDi"; + default: + return "bad type"; + } +} + +static char *intel_dsm_mux_type(u8 type) +{ + switch (type) { + case 0: + return "unknown"; + case 1: + return "No MUX, iGPU only"; + case 2: + return "No MUX, dGPU only"; + case 3: + return "MUXed between iGPU and dGPU"; + default: + return "bad type"; + } +} + +static void intel_dsm_platform_mux_info(void) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *pkg; + int i, ret; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(intel_dsm_guid); + params[0].buffer.pointer = (char *)intel_dsm_guid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = INTEL_DSM_REVISION_ID; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = 0; + + ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, + &output); + if (ret) { + DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); + goto out; + } + + pkg = (union acpi_object *)output.pointer; + + if (pkg->type == ACPI_TYPE_PACKAGE) { + union acpi_object *connector_count = &pkg->package.elements[0]; + DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", + (unsigned long long)connector_count->integer.value); + for (i = 1; i < pkg->package.count; i++) { + union acpi_object *obj = &pkg->package.elements[i]; + union acpi_object *connector_id = + &obj->package.elements[0]; + union acpi_object *info = &obj->package.elements[1]; + DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", + (unsigned long long)connector_id->integer.value); + DRM_DEBUG_DRIVER(" port id: %s\n", + intel_dsm_port_name(info->buffer.pointer[0])); + DRM_DEBUG_DRIVER(" display mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[1])); + DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[2])); + DRM_DEBUG_DRIVER(" hpd mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[3])); + } + } else { + DRM_ERROR("MUX INFO call failed\n"); + } + +out: + kfree(output.pointer); +} + +static int intel_dsm_switchto(enum vga_switcheroo_client_id id) +{ + return 0; +} + +static int intel_dsm_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + return 0; +} + +static int intel_dsm_init(void) +{ + return 0; +} + +static int intel_dsm_get_client_id(struct pci_dev *pdev) +{ + if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + return VGA_SWITCHEROO_IGD; + else + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler intel_dsm_handler = { + .switchto = intel_dsm_switchto, + .power_state = intel_dsm_power_state, + .init = intel_dsm_init, + .get_client_id = intel_dsm_get_client_id, +}; + +static bool intel_dsm_pci_probe(struct pci_dev *pdev) +{ + acpi_handle dhandle, intel_handle; + acpi_status status; + int ret; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "_DSM", &intel_handle); + if (ACPI_FAILURE(status)) { + DRM_DEBUG_KMS("no _DSM method for intel device\n"); + return false; + } + + ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); + if (ret < 0) { + DRM_ERROR("failed to get supported _DSM functions\n"); + return false; + } + + intel_dsm_priv.dhandle = dhandle; + + intel_dsm_platform_mux_info(); + return true; +} + +static bool intel_dsm_detect(void) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + struct pci_dev *pdev = NULL; + bool has_dsm = false; + int vga_count = 0; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + has_dsm |= intel_dsm_pci_probe(pdev); + } + + if (vga_count == 2 && has_dsm) { + acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", + acpi_method_name); + return true; + } + + return false; +} + +void intel_register_dsm_handler(void) +{ + if (!intel_dsm_detect()) + return; + + vga_switcheroo_register_handler(&intel_dsm_handler); +} + +void intel_unregister_dsm_handler(void) +{ + vga_switcheroo_unregister_handler(); +} diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 96f75d7f6633..b0b1200ed650 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -24,6 +24,7 @@ * Eric Anholt * */ +#include #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, int i, temp_downclock; struct drm_display_mode *temp_mode; - /* Defaults if we can't find VBT info */ - dev_priv->lvds_dither = 0; - dev_priv->lvds_vbt = 0; - lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); if (!lvds_options) return; @@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, dev_priv->lvds_dither = lvds_options->pixel_dither; if (lvds_options->panel_type == 0xff) return; + panel_type = lvds_options->panel_type; lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); @@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, ((unsigned char *)entry + dvo_timing_offset); panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); + if (!panel_fixed_mode) + return; fill_detail_timing_data(panel_fixed_mode, dvo_timing); @@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct lvds_dvo_timing *dvo_timing; struct drm_display_mode *panel_fixed_mode; - dev_priv->sdvo_lvds_vbt_mode = NULL; - sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); if (!sdvo_lvds_options) return; @@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv, struct drm_device *dev = dev_priv->dev; struct bdb_general_features *general; - /* Set sensible defaults in case we can't find the general block */ - dev_priv->int_tv_support = 1; - dev_priv->int_crt_support = 1; - general = find_section(bdb, BDB_GENERAL_FEATURES); if (general) { dev_priv->int_tv_support = general->int_tv_support; @@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv, dev_priv->lvds_use_ssc = general->enable_ssc; if (dev_priv->lvds_use_ssc) { - if (IS_I85X(dev_priv->dev)) + if (IS_I85X(dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; - else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) + else if (IS_GEN5(dev) || IS_GEN6(dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; else @@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_general_definitions *general; - const int crt_bus_map_table[] = { - GPIOB, - GPIOA, - GPIOC, - GPIOD, - GPIOE, - GPIOF, - }; general = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (general) { @@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv, if (block_size >= sizeof(*general)) { int bus_pin = general->crt_ddc_gmbus_pin; DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); - if ((bus_pin >= 1) && (bus_pin <= 6)) { - dev_priv->crt_ddc_bus = - crt_bus_map_table[bus_pin-1]; - } + if (bus_pin >= 1 && bus_pin <= 6) + dev_priv->crt_ddc_pin = bus_pin; } else { DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", block_size); @@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv, static void parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, - struct bdb_header *bdb) + struct bdb_header *bdb) { struct sdvo_device_mapping *p_mapping; struct bdb_general_definitions *p_defs; @@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { - DRM_DEBUG_KMS("No general definition block is found\n"); + DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); return; } /* judge whether the size of child device meets the requirements. @@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, p_mapping->slave_addr = p_child->slave_addr; p_mapping->dvo_wiring = p_child->dvo_wiring; p_mapping->ddc_pin = p_child->ddc_pin; + p_mapping->i2c_pin = p_child->i2c_pin; + p_mapping->i2c_speed = p_child->i2c_speed; p_mapping->initialized = 1; + DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n", + p_mapping->dvo_port, + p_mapping->slave_addr, + p_mapping->dvo_wiring, + p_mapping->ddc_pin, + p_mapping->i2c_pin, + p_mapping->i2c_speed); } else { DRM_DEBUG_KMS("Maybe one SDVO port is shared by " "two SDVO device.\n"); @@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv, if (!driver) return; - if (driver && SUPPORTS_EDP(dev) && - driver->lvds_config == BDB_DRIVER_FEATURE_EDP) { - dev_priv->edp_support = 1; - } else { - dev_priv->edp_support = 0; - } + if (SUPPORTS_EDP(dev) && + driver->lvds_config == BDB_DRIVER_FEATURE_EDP) + dev_priv->edp.support = 1; - if (driver && driver->dual_frequency) + if (driver->dual_frequency) dev_priv->render_reclock_avail = true; } @@ -424,27 +414,78 @@ static void parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_edp *edp; + struct edp_power_seq *edp_pps; + struct edp_link_params *edp_link_params; edp = find_section(bdb, BDB_EDP); if (!edp) { - if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { + if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { DRM_DEBUG_KMS("No eDP BDB found but eDP panel " - "supported, assume 18bpp panel color " - "depth.\n"); - dev_priv->edp_bpp = 18; + "supported, assume %dbpp panel color " + "depth.\n", + dev_priv->edp.bpp); } return; } switch ((edp->color_depth >> (panel_type * 2)) & 3) { case EDP_18BPP: - dev_priv->edp_bpp = 18; + dev_priv->edp.bpp = 18; break; case EDP_24BPP: - dev_priv->edp_bpp = 24; + dev_priv->edp.bpp = 24; break; case EDP_30BPP: - dev_priv->edp_bpp = 30; + dev_priv->edp.bpp = 30; + break; + } + + /* Get the eDP sequencing and link info */ + edp_pps = &edp->power_seqs[panel_type]; + edp_link_params = &edp->link_params[panel_type]; + + dev_priv->edp.pps = *edp_pps; + + dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : + DP_LINK_BW_1_62; + switch (edp_link_params->lanes) { + case 0: + dev_priv->edp.lanes = 1; + break; + case 1: + dev_priv->edp.lanes = 2; + break; + case 3: + default: + dev_priv->edp.lanes = 4; + break; + } + switch (edp_link_params->preemphasis) { + case 0: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; + break; + case 1: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; + break; + case 2: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; + break; + case 3: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; + break; + } + switch (edp_link_params->vswing) { + case 0: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; + break; + case 1: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; + break; + case 2: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; + break; + case 3: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; break; } } @@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { - DRM_DEBUG_KMS("No general definition block is found\n"); + DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } /* judge whether the size of child device meets the requirements. @@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv, } return; } + +static void +init_vbt_defaults(struct drm_i915_private *dev_priv) +{ + dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; + + /* LFP panel data */ + dev_priv->lvds_dither = 1; + dev_priv->lvds_vbt = 0; + + /* SDVO panel data */ + dev_priv->sdvo_lvds_vbt_mode = NULL; + + /* general features */ + dev_priv->int_tv_support = 1; + dev_priv->int_crt_support = 1; + dev_priv->lvds_use_ssc = 0; + + /* eDP data */ + dev_priv->edp.bpp = 18; +} + /** - * intel_init_bios - initialize VBIOS settings & find VBT + * intel_parse_bios - find VBT and initialize settings from the BIOS * @dev: DRM device * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * to appropriate values. * - * VBT existence is a sanity check that is relied on by other i830_bios.c code. - * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may - * feed an updated VBT back through that, compared to what we'll fetch using - * this method of groping around in the BIOS data. - * * Returns 0 on success, nonzero on failure. */ bool -intel_init_bios(struct drm_device *dev) +intel_parse_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev->pdev; - struct vbt_header *vbt = NULL; - struct bdb_header *bdb; - u8 __iomem *bios; - size_t size; - int i; + struct bdb_header *bdb = NULL; + u8 __iomem *bios = NULL; - bios = pci_map_rom(pdev, &size); - if (!bios) - return -1; + init_vbt_defaults(dev_priv); - /* Scour memory looking for the VBT signature */ - for (i = 0; i + 4 < size; i++) { - if (!memcmp(bios + i, "$VBT", 4)) { - vbt = (struct vbt_header *)(bios + i); - break; + /* XXX Should this validation be moved to intel_opregion.c? */ + if (dev_priv->opregion.vbt) { + struct vbt_header *vbt = dev_priv->opregion.vbt; + if (memcmp(vbt->signature, "$VBT", 4) == 0) { + DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n", + vbt->signature); + bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); + } else + dev_priv->opregion.vbt = NULL; + } + + if (bdb == NULL) { + struct vbt_header *vbt = NULL; + size_t size; + int i; + + bios = pci_map_rom(pdev, &size); + if (!bios) + return -1; + + /* Scour memory looking for the VBT signature */ + for (i = 0; i + 4 < size; i++) { + if (!memcmp(bios + i, "$VBT", 4)) { + vbt = (struct vbt_header *)(bios + i); + break; + } } - } - if (!vbt) { - DRM_ERROR("VBT signature missing\n"); - pci_unmap_rom(pdev, bios); - return -1; - } + if (!vbt) { + DRM_ERROR("VBT signature missing\n"); + pci_unmap_rom(pdev, bios); + return -1; + } - bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); + bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); + } /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); @@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev) parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); - pci_unmap_rom(pdev, bios); + if (bios) + pci_unmap_rom(pdev, bios); return 0; } + +/* Ensure that vital registers have been initialised, even if the BIOS + * is absent or just failing to do its job. + */ +void intel_setup_bios(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Set the Panel Power On/Off timings if uninitialized. */ + if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { + /* Set T2 to 40ms and T5 to 200ms */ + I915_WRITE(PP_ON_DELAYS, 0x019007d0); + + /* Set T3 to 35ms and Tx to 200ms */ + I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); + } +} diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 4c18514f6f80..5f8e4edcbbb9 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -197,7 +197,8 @@ struct bdb_general_features { struct child_device_config { u16 handle; u16 device_type; - u8 device_id[10]; /* See DEVICE_TYPE_* above */ + u8 i2c_speed; + u8 rsvd[9]; u16 addin_offset; u8 dvo_port; /* See Device_PORT_* above */ u8 i2c_pin; @@ -466,7 +467,8 @@ struct bdb_edp { struct edp_link_params link_params[16]; } __attribute__ ((packed)); -bool intel_init_bios(struct drm_device *dev); +void intel_setup_bios(struct drm_device *dev); +bool intel_parse_bios(struct drm_device *dev); /* * Driver<->VBIOS interaction occurs through scratch bits in diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 197d4f32585a..c55c77043357 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector, if (mode->clock < 25000) return MODE_CLOCK_LOW; - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) max_clock = 350000; else max_clock = 400000; @@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, * Disable separate mode multiplier used when cloning SDVO to CRT * XXX this needs to be adjusted when we really are cloning */ - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { dpll_md = I915_READ(dpll_md_reg); I915_WRITE(dpll_md_reg, dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); @@ -187,11 +187,12 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(PCH_ADPA, adpa); if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, - 1000, 1)) + 1000)) DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { - I915_WRITE(PCH_ADPA, temp); + /* Make sure hotplug is enabled */ + I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); (void)I915_READ(PCH_ADPA); } @@ -244,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) /* wait for FORCE_DETECT to go off */ if (wait_for((I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT) == 0, - 1000, 1)) + 1000)) DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); } @@ -261,21 +262,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) return ret; } +static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) +{ + u8 buf; + struct i2c_msg msgs[] = { + { + .addr = 0xA0, + .flags = 0, + .len = 1, + .buf = &buf, + }, + }; + /* DDC monitor detect: Does it ACK a write to 0xA0? */ + return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; +} + static bool intel_crt_detect_ddc(struct drm_encoder *encoder) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + struct drm_i915_private *dev_priv = encoder->dev->dev_private; /* CRT should always be at 0, but check anyway */ if (intel_encoder->type != INTEL_OUTPUT_ANALOG) return false; - return intel_ddc_probe(intel_encoder); + if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { + DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n"); + return true; + } + + if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { + DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); + return true; + } + + return false; } static enum drm_connector_status intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) { - struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -295,6 +322,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder uint8_t st00; enum drm_connector_status status; + DRM_DEBUG_KMS("starting load-detect on CRT\n"); + if (pipe == 0) { bclrpat_reg = BCLRPAT_A; vtotal_reg = VTOTAL_A; @@ -324,9 +353,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder /* Set the border color to purple. */ I915_WRITE(bclrpat_reg, 0x500050); - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { uint32_t pipeconf = I915_READ(pipeconf_reg); I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); + POSTING_READ(pipeconf_reg); /* Wait for next Vblank to substitue * border color for Color info */ intel_wait_for_vblank(dev, pipe); @@ -404,34 +434,37 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_encoder *encoder = intel_attached_encoder(connector); struct drm_crtc *crtc; int dpms_mode; enum drm_connector_status status; - if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { - if (intel_crt_detect_hotplug(connector)) + if (I915_HAS_HOTPLUG(dev)) { + if (intel_crt_detect_hotplug(connector)) { + DRM_DEBUG_KMS("CRT detected via hotplug\n"); return connector_status_connected; - else + } else return connector_status_disconnected; } - if (intel_crt_detect_ddc(encoder)) + if (intel_crt_detect_ddc(&encoder->base)) return connector_status_connected; if (!force) return connector->status; /* for pre-945g platforms use load detect */ - if (encoder->crtc && encoder->crtc->enabled) { - status = intel_crt_load_detect(encoder->crtc, intel_encoder); + if (encoder->base.crtc && encoder->base.crtc->enabled) { + status = intel_crt_load_detect(encoder->base.crtc, encoder); } else { - crtc = intel_get_load_detect_pipe(intel_encoder, connector, + crtc = intel_get_load_detect_pipe(encoder, connector, NULL, &dpms_mode); if (crtc) { - status = intel_crt_load_detect(crtc, intel_encoder); - intel_release_load_detect_pipe(intel_encoder, + if (intel_crt_detect_ddc(&encoder->base)) + status = connector_status_connected; + else + status = intel_crt_load_detect(crtc, encoder); + intel_release_load_detect_pipe(encoder, connector, dpms_mode); } else status = connector_status_unknown; @@ -449,32 +482,18 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { - int ret; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct i2c_adapter *ddc_bus; struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; - - ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); + ret = intel_ddc_get_modes(connector, + &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); if (ret || !IS_G4X(dev)) - goto end; + return ret; /* Try to probe digital port for output in DVI-I -> VGA mode. */ - ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); - - if (!ddc_bus) { - dev_printk(KERN_ERR, &connector->dev->pdev->dev, - "DDC bus registration failed for CRTDDC_D.\n"); - goto end; - } - /* Try to get modes by GPIOD port */ - ret = intel_ddc_get_modes(connector, ddc_bus); - intel_i2c_destroy(ddc_bus); - -end: - return ret; - + return intel_ddc_get_modes(connector, + &dev_priv->gmbus[GMBUS_PORT_DPB].adapter); } static int intel_crt_set_property(struct drm_connector *connector, @@ -507,7 +526,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { .mode_valid = intel_crt_mode_valid, .get_modes = intel_crt_get_modes, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_crt_enc_funcs = { @@ -520,7 +539,6 @@ void intel_crt_init(struct drm_device *dev) struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; - u32 i2c_reg; intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); if (!intel_encoder) @@ -536,27 +554,10 @@ void intel_crt_init(struct drm_device *dev) drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); - - /* Set up the DDC bus. */ - if (HAS_PCH_SPLIT(dev)) - i2c_reg = PCH_GPIOA; - else { - i2c_reg = GPIOA; - /* Use VBT information for CRT DDC if available */ - if (dev_priv->crt_ddc_bus != 0) - i2c_reg = dev_priv->crt_ddc_bus; - } - intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); - if (!intel_encoder->ddc_bus) { - dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " - "failed.\n"); - return; - } + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_ANALOG; intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | @@ -566,7 +567,7 @@ void intel_crt_init(struct drm_device *dev) connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 979228594599..990f065374b2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -43,8 +43,8 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type); static void intel_update_watermarks(struct drm_device *dev); -static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); -static void intel_crtc_update_cursor(struct drm_crtc *crtc); +static void intel_increase_pllclock(struct drm_crtc *crtc); +static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); typedef struct { /* given values */ @@ -342,6 +342,16 @@ static bool intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); +static inline u32 /* units of 100MHz */ +intel_fdi_link_freq(struct drm_device *dev) +{ + if (IS_GEN5(dev)) { + struct drm_i915_private *dev_priv = dev->dev_private; + return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; + } else + return 27; +} + static const intel_limit_t intel_limits_i8xx_dvo = { .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, @@ -701,16 +711,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) limit = intel_ironlake_limit(crtc); else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); - } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) { - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits_i9xx_lvds; - else - limit = &intel_limits_i9xx_sdvo; } else if (IS_PINEVIEW(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_pineview_lvds; else limit = &intel_limits_pineview_sdvo; + } else if (!IS_GEN2(dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) + limit = &intel_limits_i9xx_lvds; + else + limit = &intel_limits_i9xx_sdvo; } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i8xx_lvds; @@ -744,20 +754,17 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock /** * Returns whether any output on the specified pipe is of the specified type */ -bool intel_pipe_has_type (struct drm_crtc *crtc, int type) +bool intel_pipe_has_type(struct drm_crtc *crtc, int type) { - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *l_entry; + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; - list_for_each_entry(l_entry, &mode_config->encoder_list, head) { - if (l_entry && l_entry->crtc == crtc) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry); - if (intel_encoder->type == type) - return true; - } - } - return false; + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) + if (encoder->base.crtc == crtc && encoder->type == type) + return true; + + return false; } #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) @@ -928,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; intel_clock_t clock; - /* return directly when it is eDP */ - if (HAS_eDP) - return true; - if (target < 200000) { clock.n = 1; clock.p1 = 2; @@ -955,26 +958,26 @@ static bool intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { - intel_clock_t clock; - if (target < 200000) { - clock.p1 = 2; - clock.p2 = 10; - clock.n = 2; - clock.m1 = 23; - clock.m2 = 8; - } else { - clock.p1 = 1; - clock.p2 = 10; - clock.n = 1; - clock.m1 = 14; - clock.m2 = 2; - } - clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); - clock.p = (clock.p1 * clock.p2); - clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; - clock.vco = 0; - memcpy(best_clock, &clock, sizeof(intel_clock_t)); - return true; + intel_clock_t clock; + if (target < 200000) { + clock.p1 = 2; + clock.p2 = 10; + clock.n = 2; + clock.m1 = 23; + clock.m2 = 8; + } else { + clock.p1 = 1; + clock.p2 = 10; + clock.n = 1; + clock.m1 = 14; + clock.m2 = 2; + } + clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); + clock.p = (clock.p1 * clock.p2); + clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; + clock.vco = 0; + memcpy(best_clock, &clock, sizeof(intel_clock_t)); + return true; } /** @@ -1007,9 +1010,9 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); /* Wait for vblank interrupt bit to set */ - if (wait_for((I915_READ(pipestat_reg) & - PIPE_VBLANK_INTERRUPT_STATUS), - 50, 0)) + if (wait_for(I915_READ(pipestat_reg) & + PIPE_VBLANK_INTERRUPT_STATUS, + 50)) DRM_DEBUG_KMS("vblank wait timed out\n"); } @@ -1028,36 +1031,35 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) * Otherwise: * wait for the display line value to settle (it usually * ends up stopping at the start of the next frame). - * + * */ -static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) +void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 4) { - int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); + int reg = PIPECONF(pipe); /* Wait for the Pipe State to go off */ - if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, - 100, 0)) + if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, + 100)) DRM_DEBUG_KMS("pipe_off wait timed out\n"); } else { u32 last_line; - int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); + int reg = PIPEDSL(pipe); unsigned long timeout = jiffies + msecs_to_jiffies(100); /* Wait for the display line to settle */ do { - last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; + last_line = I915_READ(reg) & DSL_LINEMASK; mdelay(5); - } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && + } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && time_after(timeout, jiffies)); if (time_after(jiffies, timeout)) DRM_DEBUG_KMS("pipe_off wait timed out\n"); } } -/* Parameters have changed, update FBC info */ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct drm_device *dev = crtc->dev; @@ -1069,6 +1071,14 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) int plane, i; u32 fbc_ctl, fbc_ctl2; + if (fb->pitch == dev_priv->cfb_pitch && + obj_priv->fence_reg == dev_priv->cfb_fence && + intel_crtc->plane == dev_priv->cfb_plane && + I915_READ(FBC_CONTROL) & FBC_CTL_EN) + return; + + i8xx_disable_fbc(dev); + dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; if (fb->pitch < dev_priv->cfb_pitch) @@ -1102,7 +1112,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) I915_WRITE(FBC_CONTROL, fbc_ctl); DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ", - dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); + dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); } void i8xx_disable_fbc(struct drm_device *dev) @@ -1110,19 +1120,16 @@ void i8xx_disable_fbc(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 fbc_ctl; - if (!I915_HAS_FBC(dev)) - return; - - if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN)) - return; /* Already off, just return */ - /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); + if ((fbc_ctl & FBC_CTL_EN) == 0) + return; + fbc_ctl &= ~FBC_CTL_EN; I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) { + if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { DRM_DEBUG_KMS("FBC idle timed out\n"); return; } @@ -1145,14 +1152,27 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : - DPFC_CTL_PLANEB); + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; u32 dpfc_ctl; + dpfc_ctl = I915_READ(DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && + dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_plane == intel_crtc->plane && + dev_priv->cfb_y == crtc->y) + return; + + I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); + POSTING_READ(DPFC_CONTROL); + intel_wait_for_vblank(dev, intel_crtc->pipe); + } + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_fence = obj_priv->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; + dev_priv->cfb_y = crtc->y; dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; if (obj_priv->tiling_mode != I915_TILING_NONE) { @@ -1162,7 +1182,6 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY); } - I915_WRITE(DPFC_CONTROL, dpfc_ctl); I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); @@ -1181,10 +1200,12 @@ void g4x_disable_fbc(struct drm_device *dev) /* Disable compression */ dpfc_ctl = I915_READ(DPFC_CONTROL); - dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(DPFC_CONTROL, dpfc_ctl); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(DPFC_CONTROL, dpfc_ctl); - DRM_DEBUG_KMS("disabled FBC\n"); + DRM_DEBUG_KMS("disabled FBC\n"); + } } static bool g4x_fbc_enabled(struct drm_device *dev) @@ -1202,16 +1223,30 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : - DPFC_CTL_PLANEB; + int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; u32 dpfc_ctl; + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && + dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_plane == intel_crtc->plane && + dev_priv->cfb_offset == obj_priv->gtt_offset && + dev_priv->cfb_y == crtc->y) + return; + + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); + POSTING_READ(ILK_DPFC_CONTROL); + intel_wait_for_vblank(dev, intel_crtc->pipe); + } + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_fence = obj_priv->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; + dev_priv->cfb_offset = obj_priv->gtt_offset; + dev_priv->cfb_y = crtc->y; - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); dpfc_ctl &= DPFC_RESERVED; dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); if (obj_priv->tiling_mode != I915_TILING_NONE) { @@ -1221,15 +1256,13 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); } - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); /* enable it... */ - I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | - DPFC_CTL_EN); + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); } @@ -1241,10 +1274,12 @@ void ironlake_disable_fbc(struct drm_device *dev) /* Disable compression */ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); - dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); - DRM_DEBUG_KMS("disabled FBC\n"); + DRM_DEBUG_KMS("disabled FBC\n"); + } } static bool ironlake_fbc_enabled(struct drm_device *dev) @@ -1286,8 +1321,7 @@ void intel_disable_fbc(struct drm_device *dev) /** * intel_update_fbc - enable/disable FBC as needed - * @crtc: CRTC to point the compressor at - * @mode: mode in use + * @dev: the drm_device * * Set up the framebuffer compression hardware at mode set time. We * enable it if possible: @@ -1304,18 +1338,14 @@ void intel_disable_fbc(struct drm_device *dev) * * We need to enable/disable FBC on a global basis. */ -static void intel_update_fbc(struct drm_crtc *crtc, - struct drm_display_mode *mode) +static void intel_update_fbc(struct drm_device *dev) { - struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_framebuffer *fb = crtc->fb; + struct drm_crtc *crtc = NULL, *tmp_crtc; + struct intel_crtc *intel_crtc; + struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj_priv; - struct drm_crtc *tmp_crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int plane = intel_crtc->plane; - int crtcs_enabled = 0; DRM_DEBUG_KMS("\n"); @@ -1325,12 +1355,6 @@ static void intel_update_fbc(struct drm_crtc *crtc, if (!I915_HAS_FBC(dev)) return; - if (!crtc->fb) - return; - - intel_fb = to_intel_framebuffer(fb); - obj_priv = to_intel_bo(intel_fb->obj); - /* * If FBC is already on, we just have to verify that we can * keep it that way... @@ -1341,35 +1365,47 @@ static void intel_update_fbc(struct drm_crtc *crtc, * - going to an unsupported config (interlace, pixel multiply, etc.) */ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { - if (tmp_crtc->enabled) - crtcs_enabled++; + if (tmp_crtc->enabled) { + if (crtc) { + DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + goto out_disable; + } + crtc = tmp_crtc; + } } - DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled); - if (crtcs_enabled > 1) { - DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + + if (!crtc || crtc->fb == NULL) { + DRM_DEBUG_KMS("no output, disabling\n"); + dev_priv->no_fbc_reason = FBC_NO_OUTPUT; goto out_disable; } + + intel_crtc = to_intel_crtc(crtc); + fb = crtc->fb; + intel_fb = to_intel_framebuffer(fb); + obj_priv = to_intel_bo(intel_fb->obj); + if (intel_fb->obj->size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " - "compression\n"); + "compression\n"); dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; goto out_disable; } - if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || - (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { + if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || + (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { DRM_DEBUG_KMS("mode incompatible with compression, " - "disabling\n"); + "disabling\n"); dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; goto out_disable; } - if ((mode->hdisplay > 2048) || - (mode->vdisplay > 1536)) { + if ((crtc->mode.hdisplay > 2048) || + (crtc->mode.vdisplay > 1536)) { DRM_DEBUG_KMS("mode too large for compression, disabling\n"); dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; goto out_disable; } - if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { + if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { DRM_DEBUG_KMS("plane not 0, disabling compression\n"); dev_priv->no_fbc_reason = FBC_BAD_PLANE; goto out_disable; @@ -1384,18 +1420,7 @@ static void intel_update_fbc(struct drm_crtc *crtc, if (in_dbg_master()) goto out_disable; - if (intel_fbc_enabled(dev)) { - /* We can re-enable it in this case, but need to update pitch */ - if ((fb->pitch > dev_priv->cfb_pitch) || - (obj_priv->fence_reg != dev_priv->cfb_fence) || - (plane != dev_priv->cfb_plane)) - intel_disable_fbc(dev); - } - - /* Now try to turn it back on if possible */ - if (!intel_fbc_enabled(dev)) - intel_enable_fbc(crtc, 500); - + intel_enable_fbc(crtc, 500); return; out_disable: @@ -1407,7 +1432,9 @@ out_disable: } int -intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) +intel_pin_and_fence_fb_obj(struct drm_device *dev, + struct drm_gem_object *obj, + bool pipelined) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 alignment; @@ -1417,7 +1444,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) alignment = 128 * 1024; - else if (IS_I965G(dev)) + else if (INTEL_INFO(dev)->gen >= 4) alignment = 4 * 1024; else alignment = 64 * 1024; @@ -1435,9 +1462,13 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) } ret = i915_gem_object_pin(obj, alignment); - if (ret != 0) + if (ret) return ret; + ret = i915_gem_object_set_to_display_plane(obj, pipelined); + if (ret) + goto err_unpin; + /* Install a fence for tiled scan-out. Pre-i965 always needs a * fence, whereas 965+ only requires a fence if using * framebuffer compression. For simplicity, we always install @@ -1445,20 +1476,22 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) */ if (obj_priv->fence_reg == I915_FENCE_REG_NONE && obj_priv->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; - } + ret = i915_gem_object_get_fence_reg(obj, false); + if (ret) + goto err_unpin; } return 0; + +err_unpin: + i915_gem_object_unpin(obj); + return ret; } /* Assume fb object is pinned & idle & fenced and just update base pointers */ static int intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, - int x, int y) + int x, int y, enum mode_set_atomic state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1468,12 +1501,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_gem_object *obj; int plane = intel_crtc->plane; unsigned long Start, Offset; - int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); - int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); - int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; - int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; u32 dspcntr; + u32 reg; switch (plane) { case 0: @@ -1488,7 +1517,8 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, obj = intel_fb->obj; obj_priv = to_intel_bo(obj); - dspcntr = I915_READ(dspcntr_reg); + reg = DSPCNTR(plane); + dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (fb->bits_per_pixel) { @@ -1509,7 +1539,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, DRM_ERROR("Unknown color depth\n"); return -EINVAL; } - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { if (obj_priv->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; else @@ -1520,28 +1550,24 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* must disable */ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - I915_WRITE(dspcntr_reg, dspcntr); + I915_WRITE(reg, dspcntr); Start = obj_priv->gtt_offset; Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", Start, Offset, x, y, fb->pitch); - I915_WRITE(dspstride, fb->pitch); - if (IS_I965G(dev)) { - I915_WRITE(dspsurf, Start); - I915_WRITE(dsptileoff, (y << 16) | x); - I915_WRITE(dspbase, Offset); - } else { - I915_WRITE(dspbase, Start + Offset); - } - POSTING_READ(dspbase); + I915_WRITE(DSPSTRIDE(plane), fb->pitch); + if (INTEL_INFO(dev)->gen >= 4) { + I915_WRITE(DSPSURF(plane), Start); + I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); + I915_WRITE(DSPADDR(plane), Offset); + } else + I915_WRITE(DSPADDR(plane), Start + Offset); + POSTING_READ(reg); - if (IS_I965G(dev) || plane == 0) - intel_update_fbc(crtc, &crtc->mode); - - intel_wait_for_vblank(dev, intel_crtc->pipe); - intel_increase_pllclock(crtc, true); + intel_update_fbc(dev); + intel_increase_pllclock(crtc); return 0; } @@ -1553,11 +1579,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_device *dev = crtc->dev; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; int ret; /* no fb bound */ @@ -1566,46 +1587,43 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } - switch (plane) { + switch (intel_crtc->plane) { case 0: case 1: break; default: - DRM_ERROR("Can't update plane %d in SAREA\n", plane); return -EINVAL; } - intel_fb = to_intel_framebuffer(crtc->fb); - obj = intel_fb->obj; - obj_priv = to_intel_bo(obj); - mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj); + ret = intel_pin_and_fence_fb_obj(dev, + to_intel_framebuffer(crtc->fb)->obj, + false); if (ret != 0) { mutex_unlock(&dev->struct_mutex); return ret; } - ret = i915_gem_object_set_to_display_plane(obj); - if (ret != 0) { - i915_gem_object_unpin(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y); - if (ret) { - i915_gem_object_unpin(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - if (old_fb) { - intel_fb = to_intel_framebuffer(old_fb); - obj_priv = to_intel_bo(intel_fb->obj); - i915_gem_object_unpin(intel_fb->obj); + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + wait_event(dev_priv->pending_flip_queue, + atomic_read(&obj_priv->pending_flip) == 0); } + ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, + LEAVE_ATOMIC_MODE_SET); + if (ret) { + i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + + if (old_fb) + i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); + mutex_unlock(&dev->struct_mutex); if (!dev->primary->master) @@ -1615,7 +1633,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (!master_priv->sarea_priv) return 0; - if (pipe) { + if (intel_crtc->pipe) { master_priv->sarea_priv->pipeB_x = x; master_priv->sarea_priv->pipeB_y = y; } else { @@ -1626,7 +1644,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } -static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) +static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1659,6 +1677,7 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) } I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); udelay(500); } @@ -1669,84 +1688,109 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; - int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; - u32 temp, tries = 0; + u32 reg, temp, tries; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ - temp = I915_READ(fdi_rx_imr_reg); + reg = FDI_RX_IMR(pipe); + temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); + I915_WRITE(reg, temp); + I915_READ(reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ - temp = I915_READ(fdi_tx_reg); - temp |= FDI_TX_ENABLE; + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); - I915_READ(fdi_tx_reg); + I915_WRITE(reg, temp | FDI_TX_ENABLE); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); + I915_WRITE(reg, temp | FDI_RX_ENABLE); + + POSTING_READ(reg); udelay(150); + /* Ironlake workaround, enable clock pointer after FDI enable*/ + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); + + reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { - temp = I915_READ(fdi_rx_iir_reg); + temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if ((temp & FDI_RX_BIT_LOCK)) { DRM_DEBUG_KMS("FDI train 1 done.\n"); - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_BIT_LOCK); + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); break; } } if (tries == 5) - DRM_DEBUG_KMS("FDI train 1 fail!\n"); + DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_tx_reg, temp); + I915_WRITE(reg, temp); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_rx_reg, temp); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(150); - tries = 0; - + reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { - temp = I915_READ(fdi_rx_iir_reg); + temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_SYMBOL_LOCK); + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (tries == 5) - DRM_DEBUG_KMS("FDI train 2 fail!\n"); + DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done\n"); + + /* enable normal train */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + + /* wait one idle pattern time */ + POSTING_READ(reg); + udelay(1000); } -static int snb_b_fdi_train_param [] = { +static const int const snb_b_fdi_train_param [] = { FDI_LINK_TRAIN_400MV_0DB_SNB_B, FDI_LINK_TRAIN_400MV_6DB_SNB_B, FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, @@ -1760,24 +1804,22 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR; - int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; - u32 temp, i; + u32 reg, temp, i; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ - temp = I915_READ(fdi_rx_imr_reg); + reg = FDI_RX_IMR(pipe); + temp = I915_READ(reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ - temp = I915_READ(fdi_tx_reg); - temp |= FDI_TX_ENABLE; + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~(7 << 19); temp |= (intel_crtc->fdi_lanes - 1) << 19; temp &= ~FDI_LINK_TRAIN_NONE; @@ -1785,10 +1827,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; - I915_WRITE(fdi_tx_reg, temp); - I915_READ(fdi_tx_reg); + I915_WRITE(reg, temp | FDI_TX_ENABLE); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; @@ -1796,32 +1838,37 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); + I915_WRITE(reg, temp | FDI_RX_ENABLE); + + POSTING_READ(reg); udelay(150); for (i = 0; i < 4; i++ ) { - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; - I915_WRITE(fdi_tx_reg, temp); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(500); - temp = I915_READ(fdi_rx_iir_reg); + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK) { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_BIT_LOCK); + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); DRM_DEBUG_KMS("FDI train 1 done.\n"); break; } } if (i == 4) - DRM_DEBUG_KMS("FDI train 1 fail!\n"); + DRM_ERROR("FDI train 1 fail!\n"); /* Train 2 */ - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; if (IS_GEN6(dev)) { @@ -1829,9 +1876,10 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; } - I915_WRITE(fdi_tx_reg, temp); + I915_WRITE(reg, temp); - temp = I915_READ(fdi_rx_reg); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); if (HAS_PCH_CPT(dev)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; @@ -1839,64 +1887,419 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; } - I915_WRITE(fdi_rx_reg, temp); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(150); for (i = 0; i < 4; i++ ) { - temp = I915_READ(fdi_tx_reg); + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; - I915_WRITE(fdi_tx_reg, temp); + I915_WRITE(reg, temp); + + POSTING_READ(reg); udelay(500); - temp = I915_READ(fdi_rx_iir_reg); + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_SYMBOL_LOCK); + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } } if (i == 4) - DRM_DEBUG_KMS("FDI train 2 fail!\n"); + DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done.\n"); } -static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) +static void ironlake_fdi_enable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp; + + /* Write the TU size bits so error detection works */ + I915_WRITE(FDI_RX_TUSIZE1(pipe), + I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); + + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~((0x7 << 19) | (0x7 << 16)); + temp |= (intel_crtc->fdi_lanes - 1) << 19; + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); + + POSTING_READ(reg); + udelay(200); + + /* Switch from Rawclk to PCDclk */ + temp = I915_READ(reg); + I915_WRITE(reg, temp | FDI_PCDCLK); + + POSTING_READ(reg); + udelay(200); + + /* Enable CPU FDI TX PLL, always on for Ironlake */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); + + POSTING_READ(reg); + udelay(100); + } +} + +static void intel_flush_display_plane(struct drm_device *dev, + int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg = DSPADDR(plane); + I915_WRITE(reg, I915_READ(reg)); +} + +/* + * When we disable a pipe, we need to clear any pending scanline wait events + * to avoid hanging the ring, which we assume we are waiting on. + */ +static void intel_clear_scanline_wait(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; + + if (IS_GEN2(dev)) + /* Can't break the hang on i8xx */ + return; + + tmp = I915_READ(PRB0_CTL); + if (tmp & RING_WAIT) { + I915_WRITE(PRB0_CTL, tmp); + POSTING_READ(PRB0_CTL); + } +} + +static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) +{ + struct drm_i915_gem_object *obj_priv; + struct drm_i915_private *dev_priv; + + if (crtc->fb == NULL) + return; + + obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); + dev_priv = crtc->dev->dev_private; + wait_event(dev_priv->pending_flip_queue, + atomic_read(&obj_priv->pending_flip) == 0); +} + +static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; - int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; - int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; - int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; - int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; - int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; - int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; - int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B; - int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B; - int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B; - int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B; - int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; - int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; - int trans_dpll_sel = (pipe == 0) ? 0 : 1; - u32 temp; - u32 pipe_bpc; + u32 reg, temp; - temp = I915_READ(pipeconf_reg); - pipe_bpc = temp & PIPE_BPC_MASK; + if (intel_crtc->active) + return; + + intel_crtc->active = true; + intel_update_watermarks(dev); + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + temp = I915_READ(PCH_LVDS); + if ((temp & LVDS_PORT_EN) == 0) + I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); + } + + ironlake_fdi_enable(crtc); + + /* Enable panel fitting for LVDS */ + if (dev_priv->pch_pf_size && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { + /* Force use of hard-coded filter coefficients + * as some pre-programmed values are broken, + * e.g. x201. + */ + I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, + PF_ENABLE | PF_FILTER_MED_3x3); + I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, + dev_priv->pch_pf_pos); + I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, + dev_priv->pch_pf_size); + } + + /* Enable CPU pipe */ + reg = PIPECONF(pipe); + temp = I915_READ(reg); + if ((temp & PIPECONF_ENABLE) == 0) { + I915_WRITE(reg, temp | PIPECONF_ENABLE); + POSTING_READ(reg); + intel_wait_for_vblank(dev, intel_crtc->pipe); + } + + /* configure and enable CPU plane */ + reg = DSPCNTR(plane); + temp = I915_READ(reg); + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { + I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev, plane); + } + + /* For PCH output, training FDI link */ + if (IS_GEN6(dev)) + gen6_fdi_link_train(crtc); + else + ironlake_fdi_link_train(crtc); + + /* enable PCH DPLL */ + reg = PCH_DPLL(pipe); + temp = I915_READ(reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(reg, temp | DPLL_VCO_ENABLE); + POSTING_READ(reg); + udelay(200); + } + + if (HAS_PCH_CPT(dev)) { + /* Be sure PCH DPLL SEL is set */ + temp = I915_READ(PCH_DPLL_SEL); + if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0) + temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); + else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0) + temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + I915_WRITE(PCH_DPLL_SEL, temp); + } + + /* set transcoder timing */ + I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); + I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); + I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); + + I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); + I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); + I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); + + /* For PCH DP, enable TRANS_DP_CTL */ + if (HAS_PCH_CPT(dev) && + intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + reg = TRANS_DP_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK); + temp |= (TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_ENH_FRAMING); + + if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; + + switch (intel_trans_dp_port_sel(crtc)) { + case PCH_DP_B: + temp |= TRANS_DP_PORT_SEL_B; + break; + case PCH_DP_C: + temp |= TRANS_DP_PORT_SEL_C; + break; + case PCH_DP_D: + temp |= TRANS_DP_PORT_SEL_D; + break; + default: + DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); + temp |= TRANS_DP_PORT_SEL_B; + break; + } + + I915_WRITE(reg, temp); + } + + /* enable PCH transcoder */ + reg = TRANSCONF(pipe); + temp = I915_READ(reg); + /* + * make the BPC in transcoder be consistent with + * that in pipeconf reg. + */ + temp &= ~PIPE_BPC_MASK; + temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; + I915_WRITE(reg, temp | TRANS_ENABLE); + if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) + DRM_ERROR("failed to enable transcoder %d\n", pipe); + + intel_crtc_load_lut(crtc); + intel_update_fbc(dev); + intel_crtc_update_cursor(crtc, true); +} + +static void ironlake_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + u32 reg, temp; + + if (!intel_crtc->active) + return; + + intel_crtc_wait_for_pending_flips(crtc); + drm_vblank_off(dev, pipe); + intel_crtc_update_cursor(crtc, false); + + /* Disable display plane */ + reg = DSPCNTR(plane); + temp = I915_READ(reg); + if (temp & DISPLAY_PLANE_ENABLE) { + I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev, plane); + } + + if (dev_priv->cfb_plane == plane && + dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + + /* disable cpu pipe, disable after all planes disabled */ + reg = PIPECONF(pipe); + temp = I915_READ(reg); + if (temp & PIPECONF_ENABLE) { + I915_WRITE(reg, temp & ~PIPECONF_ENABLE); + POSTING_READ(reg); + /* wait for cpu pipe off, pipe state */ + intel_wait_for_pipe_off(dev, intel_crtc->pipe); + } + + /* Disable PF */ + I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); + I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); + + /* disable CPU FDI tx and PCH FDI rx */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_ENABLE); + POSTING_READ(reg); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(0x7 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp & ~FDI_RX_ENABLE); + + POSTING_READ(reg); + udelay(100); + + /* Ironlake workaround, disable clock pointer after downing FDI */ + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); + + /* still set train pattern 1 */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + } + /* BPC in FDI rx is consistent with that in PIPECONF */ + temp &= ~(0x07 << 16); + temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; + I915_WRITE(reg, temp); + + POSTING_READ(reg); + udelay(100); + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + temp = I915_READ(PCH_LVDS); + if (temp & LVDS_PORT_EN) { + I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); + POSTING_READ(PCH_LVDS); + udelay(100); + } + } + + /* disable PCH transcoder */ + reg = TRANSCONF(plane); + temp = I915_READ(reg); + if (temp & TRANS_ENABLE) { + I915_WRITE(reg, temp & ~TRANS_ENABLE); + /* wait for PCH transcoder off, transcoder state */ + if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) + DRM_ERROR("failed to disable transcoder\n"); + } + + if (HAS_PCH_CPT(dev)) { + /* disable TRANS_DP_CTL */ + reg = TRANS_DP_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); + I915_WRITE(reg, temp); + + /* disable DPLL_SEL */ + temp = I915_READ(PCH_DPLL_SEL); + if (pipe == 0) + temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); + else + temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + I915_WRITE(PCH_DPLL_SEL, temp); + } + + /* disable PCH DPLL */ + reg = PCH_DPLL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE); + + /* Switch from PCDclk to Rawclk */ + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_PCDCLK); + + /* Disable CPU FDI TX PLL */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); + + POSTING_READ(reg); + udelay(100); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); + + /* Wait for the clocks to turn off. */ + POSTING_READ(reg); + udelay(100); + + intel_crtc->active = false; + intel_update_watermarks(dev); + intel_update_fbc(dev); + intel_clear_scanline_wait(dev); +} + +static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. @@ -1906,379 +2309,160 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - temp = I915_READ(PCH_LVDS); - if ((temp & LVDS_PORT_EN) == 0) { - I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); - POSTING_READ(PCH_LVDS); - } - } - - if (!HAS_eDP) { - - /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); - /* - * make the BPC in FDI Rx be consistent with that in - * pipeconf reg. - */ - temp &= ~(0x7 << 16); - temp |= (pipe_bpc << 11); - temp &= ~(7 << 19); - temp |= (intel_crtc->fdi_lanes - 1) << 19; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - I915_READ(fdi_rx_reg); - udelay(200); - - /* Switch from Rawclk to PCDclk */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); - I915_READ(fdi_rx_reg); - udelay(200); - - /* Enable CPU FDI TX PLL, always on for Ironlake */ - temp = I915_READ(fdi_tx_reg); - if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); - } - } - - /* Enable panel fitting for LVDS */ - if (dev_priv->pch_pf_size && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) - || HAS_eDP || intel_pch_has_edp(crtc))) { - /* Force use of hard-coded filter coefficients - * as some pre-programmed values are broken, - * e.g. x201. - */ - I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, - PF_ENABLE | PF_FILTER_MED_3x3); - I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS, - dev_priv->pch_pf_pos); - I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, - dev_priv->pch_pf_size); - } - - /* Enable CPU pipe */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) == 0) { - I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); - I915_READ(pipeconf_reg); - udelay(100); - } - - /* configure and enable CPU plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - } - - if (!HAS_eDP) { - /* For PCH output, training FDI link */ - if (IS_GEN6(dev)) - gen6_fdi_link_train(crtc); - else - ironlake_fdi_link_train(crtc); - - /* enable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - } - udelay(200); - - if (HAS_PCH_CPT(dev)) { - /* Be sure PCH DPLL SEL is set */ - temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0 && - (temp & TRANSA_DPLL_ENABLE) == 0) - temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); - else if (trans_dpll_sel == 1 && - (temp & TRANSB_DPLL_ENABLE) == 0) - temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); - I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); - } - - /* set transcoder timing */ - I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); - I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); - I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); - - I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); - I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); - I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); - - /* enable normal train */ - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_TX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_NORMAL_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE; - } - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_rx_reg); - - /* wait one idle pattern time */ - udelay(100); - - /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { - int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; - int reg; - - reg = I915_READ(trans_dp_ctl); - reg &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK); - reg |= (TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_ENH_FRAMING); - - if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) - reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; - if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) - reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; - - switch (intel_trans_dp_port_sel(crtc)) { - case PCH_DP_B: - reg |= TRANS_DP_PORT_SEL_B; - break; - case PCH_DP_C: - reg |= TRANS_DP_PORT_SEL_C; - break; - case PCH_DP_D: - reg |= TRANS_DP_PORT_SEL_D; - break; - default: - DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); - reg |= TRANS_DP_PORT_SEL_B; - break; - } - - I915_WRITE(trans_dp_ctl, reg); - POSTING_READ(trans_dp_ctl); - } - - /* enable PCH transcoder */ - temp = I915_READ(transconf_reg); - /* - * make the BPC in transcoder be consistent with - * that in pipeconf reg. - */ - temp &= ~PIPE_BPC_MASK; - temp |= pipe_bpc; - I915_WRITE(transconf_reg, temp | TRANS_ENABLE); - I915_READ(transconf_reg); - - if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1)) - DRM_ERROR("failed to enable transcoder\n"); - } - - intel_crtc_load_lut(crtc); - - intel_update_fbc(crtc, &crtc->mode); + ironlake_crtc_enable(crtc); break; case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); - - drm_vblank_off(dev, pipe); - /* Disable display plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - I915_READ(dspbase_reg); - } - - if (dev_priv->cfb_plane == plane && - dev_priv->display.disable_fbc) - dev_priv->display.disable_fbc(dev); - - /* disable cpu pipe, disable after all planes disabled */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) != 0) { - I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); - - /* wait for cpu pipe off, pipe state */ - if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1)) - DRM_ERROR("failed to turn off cpu pipe\n"); - } else - DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); - - udelay(100); - - /* Disable PF */ - I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0); - I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0); - - /* disable CPU FDI tx and PCH FDI rx */ - temp = I915_READ(fdi_tx_reg); - I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE); - I915_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - /* BPC in FDI rx is consistent with that in pipeconf */ - temp &= ~(0x07 << 16); - temp |= (pipe_bpc << 11); - I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); - - udelay(100); - - /* still set train pattern 1 */ - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); - POSTING_READ(fdi_tx_reg); - - temp = I915_READ(fdi_rx_reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - } - I915_WRITE(fdi_rx_reg, temp); - POSTING_READ(fdi_rx_reg); - - udelay(100); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - temp = I915_READ(PCH_LVDS); - I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); - I915_READ(PCH_LVDS); - udelay(100); - } - - /* disable PCH transcoder */ - temp = I915_READ(transconf_reg); - if ((temp & TRANS_ENABLE) != 0) { - I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); - - /* wait for PCH transcoder off, transcoder state */ - if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1)) - DRM_ERROR("failed to disable transcoder\n"); - } - - temp = I915_READ(transconf_reg); - /* BPC in transcoder is consistent with that in pipeconf */ - temp &= ~PIPE_BPC_MASK; - temp |= pipe_bpc; - I915_WRITE(transconf_reg, temp); - I915_READ(transconf_reg); - udelay(100); - - if (HAS_PCH_CPT(dev)) { - /* disable TRANS_DP_CTL */ - int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B; - int reg; - - reg = I915_READ(trans_dp_ctl); - reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); - I915_WRITE(trans_dp_ctl, reg); - POSTING_READ(trans_dp_ctl); - - /* disable DPLL_SEL */ - temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0) - temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); - else - temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); - I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); - - } - - /* disable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - - /* Switch from PCDclk to Rawclk */ - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_SEL_PCDCLK; - I915_WRITE(fdi_rx_reg, temp); - I915_READ(fdi_rx_reg); - - /* Disable CPU FDI TX PLL */ - temp = I915_READ(fdi_tx_reg); - I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); - - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_RX_PLL_ENABLE; - I915_WRITE(fdi_rx_reg, temp); - I915_READ(fdi_rx_reg); - - /* Wait for the clocks to turn off. */ - udelay(100); + ironlake_crtc_disable(crtc); break; } } static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) { - struct intel_overlay *overlay; - int ret; - if (!enable && intel_crtc->overlay) { - overlay = intel_crtc->overlay; - mutex_lock(&overlay->dev->struct_mutex); - for (;;) { - ret = intel_overlay_switch_off(overlay); - if (ret == 0) - break; + struct drm_device *dev = intel_crtc->base.dev; - ret = intel_overlay_recover_from_interrupt(overlay, 0); - if (ret != 0) { - /* overlay doesn't react anymore. Usually - * results in a black screen and an unkillable - * X server. */ - BUG(); - overlay->hw_wedged = HW_WEDGED; - break; - } - } - mutex_unlock(&overlay->dev->struct_mutex); + mutex_lock(&dev->struct_mutex); + (void) intel_overlay_switch_off(intel_crtc->overlay, false); + mutex_unlock(&dev->struct_mutex); } - /* Let userspace switch the overlay on again. In most cases userspace - * has to recompute where to put it anyway. */ - return; + /* Let userspace switch the overlay on again. In most cases userspace + * has to recompute where to put it anyway. + */ } -static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) +static void i9xx_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - u32 temp; + u32 reg, temp; + if (intel_crtc->active) + return; + + intel_crtc->active = true; + intel_update_watermarks(dev); + + /* Enable the DPLL */ + reg = DPLL(pipe); + temp = I915_READ(reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(reg, temp); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(reg); + udelay(150); + + I915_WRITE(reg, temp | DPLL_VCO_ENABLE); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(reg); + udelay(150); + + I915_WRITE(reg, temp | DPLL_VCO_ENABLE); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(reg); + udelay(150); + } + + /* Enable the pipe */ + reg = PIPECONF(pipe); + temp = I915_READ(reg); + if ((temp & PIPECONF_ENABLE) == 0) + I915_WRITE(reg, temp | PIPECONF_ENABLE); + + /* Enable the plane */ + reg = DSPCNTR(plane); + temp = I915_READ(reg); + if ((temp & DISPLAY_PLANE_ENABLE) == 0) { + I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev, plane); + } + + intel_crtc_load_lut(crtc); + intel_update_fbc(dev); + + /* Give the overlay scaler a chance to enable if it's on this pipe */ + intel_crtc_dpms_overlay(intel_crtc, true); + intel_crtc_update_cursor(crtc, true); +} + +static void i9xx_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + u32 reg, temp; + + if (!intel_crtc->active) + return; + + /* Give the overlay scaler a chance to disable if it's on this pipe */ + intel_crtc_wait_for_pending_flips(crtc); + drm_vblank_off(dev, pipe); + intel_crtc_dpms_overlay(intel_crtc, false); + intel_crtc_update_cursor(crtc, false); + + if (dev_priv->cfb_plane == plane && + dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + + /* Disable display plane */ + reg = DSPCNTR(plane); + temp = I915_READ(reg); + if (temp & DISPLAY_PLANE_ENABLE) { + I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE); + /* Flush the plane changes */ + intel_flush_display_plane(dev, plane); + + /* Wait for vblank for the disable to take effect */ + if (IS_GEN2(dev)) + intel_wait_for_vblank(dev, pipe); + } + + /* Don't disable pipe A or pipe A PLLs if needed */ + if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + goto done; + + /* Next, disable display pipes */ + reg = PIPECONF(pipe); + temp = I915_READ(reg); + if (temp & PIPECONF_ENABLE) { + I915_WRITE(reg, temp & ~PIPECONF_ENABLE); + + /* Wait for the pipe to turn off */ + POSTING_READ(reg); + intel_wait_for_pipe_off(dev, pipe); + } + + reg = DPLL(pipe); + temp = I915_READ(reg); + if (temp & DPLL_VCO_ENABLE) { + I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE); + + /* Wait for the clocks to turn off. */ + POSTING_READ(reg); + udelay(150); + } + +done: + intel_crtc->active = false; + intel_update_fbc(dev); + intel_update_watermarks(dev); + intel_clear_scanline_wait(dev); +} + +static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) +{ /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ @@ -2286,88 +2470,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: - /* Enable the DPLL */ - temp = I915_READ(dpll_reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(dpll_reg, temp); - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); - I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); - I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); - } - - /* Enable the pipe */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) == 0) - I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); - - /* Enable the plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) == 0) { - I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - } - - intel_crtc_load_lut(crtc); - - if ((IS_I965G(dev) || plane == 0)) - intel_update_fbc(crtc, &crtc->mode); - - /* Give the overlay scaler a chance to enable if it's on this pipe */ - intel_crtc_dpms_overlay(intel_crtc, true); - break; + i9xx_crtc_enable(crtc); + break; case DRM_MODE_DPMS_OFF: - /* Give the overlay scaler a chance to disable if it's on this pipe */ - intel_crtc_dpms_overlay(intel_crtc, false); - drm_vblank_off(dev, pipe); - - if (dev_priv->cfb_plane == plane && - dev_priv->display.disable_fbc) - dev_priv->display.disable_fbc(dev); - - /* Disable display plane */ - temp = I915_READ(dspcntr_reg); - if ((temp & DISPLAY_PLANE_ENABLE) != 0) { - I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); - /* Flush the plane changes */ - I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); - I915_READ(dspbase_reg); - } - - /* Don't disable pipe A or pipe A PLLs if needed */ - if (pipeconf_reg == PIPEACONF && - (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { - /* Wait for vblank for the disable to take effect */ - intel_wait_for_vblank(dev, pipe); - goto skip_pipe_off; - } - - /* Next, disable display pipes */ - temp = I915_READ(pipeconf_reg); - if ((temp & PIPEACONF_ENABLE) != 0) { - I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); - I915_READ(pipeconf_reg); - } - - /* Wait for the pipe to turn off */ - intel_wait_for_pipe_off(dev, pipe); - - temp = I915_READ(dpll_reg); - if ((temp & DPLL_VCO_ENABLE) != 0) { - I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); - I915_READ(dpll_reg); - } - skip_pipe_off: - /* Wait for the clocks to turn off. */ - udelay(150); + i9xx_crtc_disable(crtc); break; } } @@ -2388,26 +2494,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) return; intel_crtc->dpms_mode = mode; - intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; - - /* When switching on the display, ensure that SR is disabled - * with multiple pipes prior to enabling to new pipe. - * - * When switching off the display, make sure the cursor is - * properly hidden prior to disabling the pipe. - */ - if (mode == DRM_MODE_DPMS_ON) - intel_update_watermarks(dev); - else - intel_crtc_update_cursor(crtc); dev_priv->display.dpms(crtc, mode); - if (mode == DRM_MODE_DPMS_ON) - intel_crtc_update_cursor(crtc); - else - intel_update_watermarks(dev); - if (!dev->primary->master) return; @@ -2432,16 +2521,46 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) } } -static void intel_crtc_prepare (struct drm_crtc *crtc) +static void intel_crtc_disable(struct drm_crtc *crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + struct drm_device *dev = crtc->dev; + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + if (crtc->fb) { + mutex_lock(&dev->struct_mutex); + i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); + mutex_unlock(&dev->struct_mutex); + } } -static void intel_crtc_commit (struct drm_crtc *crtc) +/* Prepare for a mode set. + * + * Note we could be a lot smarter here. We need to figure out which outputs + * will be enabled, which disabled (in short, how the config will changes) + * and perform the minimum necessary steps to accomplish that, e.g. updating + * watermarks, FBC configuration, making sure PLLs are programmed correctly, + * panel fitting is in the proper state, etc. + */ +static void i9xx_crtc_prepare(struct drm_crtc *crtc) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); + i9xx_crtc_disable(crtc); +} + +static void i9xx_crtc_commit(struct drm_crtc *crtc) +{ + i9xx_crtc_enable(crtc); +} + +static void ironlake_crtc_prepare(struct drm_crtc *crtc) +{ + ironlake_crtc_disable(crtc); +} + +static void ironlake_crtc_commit(struct drm_crtc *crtc) +{ + ironlake_crtc_enable(crtc); } void intel_encoder_prepare (struct drm_encoder *encoder) @@ -2460,13 +2579,7 @@ void intel_encoder_commit (struct drm_encoder *encoder) void intel_encoder_destroy(struct drm_encoder *encoder) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - if (intel_encoder->ddc_bus) - intel_i2c_destroy(intel_encoder->ddc_bus); - - if (intel_encoder->i2c_bus) - intel_i2c_destroy(intel_encoder->i2c_bus); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); drm_encoder_cleanup(encoder); kfree(intel_encoder); @@ -2557,33 +2670,6 @@ static int i830_get_display_clock_speed(struct drm_device *dev) return 133000; } -/** - * Return the pipe currently connected to the panel fitter, - * or -1 if the panel fitter is not present or not in use - */ -int intel_panel_fitter_pipe (struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 pfit_control; - - /* i830 doesn't have a panel fitter */ - if (IS_I830(dev)) - return -1; - - pfit_control = I915_READ(PFIT_CONTROL); - - /* See if the panel fitter is in use */ - if ((pfit_control & PFIT_ENABLE) == 0) - return -1; - - /* 965 can place panel fitter on either pipe */ - if (IS_I965G(dev)) - return (pfit_control >> 29) & 0x3; - - /* older chips can only use pipe 1 */ - return 1; -} - struct fdi_m_n { u32 tu; u32 gmch_m; @@ -2902,7 +2988,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); + plane ? "B" : "A", size); return size; } @@ -2919,7 +3005,7 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) size >>= 1; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); + plane ? "B" : "A", size); return size; } @@ -2934,8 +3020,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) size >>= 2; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", - size); + plane ? "B" : "A", + size); return size; } @@ -2950,14 +3036,14 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) size >>= 1; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, - plane ? "B" : "A", size); + plane ? "B" : "A", size); return size; } static void pineview_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int unused, - int pixel_size) + int planeb_clock, int sr_hdisplay, int unused, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; const struct cxsr_latency *latency; @@ -3069,13 +3155,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, /* Use ns/us then divide to preserve precision */ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; + pixel_size * sr_hdisplay; sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); entries_required = (((sr_latency_ns / line_time_us) + 1000) / 1000) * pixel_size * 64; entries_required = DIV_ROUND_UP(entries_required, - g4x_cursor_wm_info.cacheline_size); + g4x_cursor_wm_info.cacheline_size); cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; if (cursor_sr > g4x_cursor_wm_info.max_wm) @@ -3087,7 +3173,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, } else { /* Turn off self refresh if both pipes are enabled */ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); + & ~FW_BLC_SELF_EN); } DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", @@ -3125,7 +3211,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, /* Use ns/us then divide to preserve precision */ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; + pixel_size * sr_hdisplay; sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); srwm = I965_FIFO_SIZE - sr_entries; @@ -3134,11 +3220,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, srwm &= 0x1ff; sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * 64; + pixel_size * 64; sr_entries = DIV_ROUND_UP(sr_entries, i965_cursor_wm_info.cacheline_size); cursor_sr = i965_cursor_wm_info.fifo_size - - (sr_entries + i965_cursor_wm_info.guard_size); + (sr_entries + i965_cursor_wm_info.guard_size); if (cursor_sr > i965_cursor_wm_info.max_wm) cursor_sr = i965_cursor_wm_info.max_wm; @@ -3146,11 +3232,11 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, DRM_DEBUG_KMS("self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr); - if (IS_I965GM(dev)) + if (IS_CRESTLINE(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); } else { /* Turn off self refresh if both pipes are enabled */ - if (IS_I965GM(dev)) + if (IS_CRESTLINE(dev)) I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); } @@ -3180,9 +3266,9 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, int sr_clock, sr_entries = 0; /* Create copies of the base settings for each pipe */ - if (IS_I965GM(dev) || IS_I945GM(dev)) + if (IS_CRESTLINE(dev) || IS_I945GM(dev)) planea_params = planeb_params = i945_wm_info; - else if (IS_I9XX(dev)) + else if (!IS_GEN2(dev)) planea_params = planeb_params = i915_wm_info; else planea_params = planeb_params = i855_wm_info; @@ -3217,7 +3303,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, /* Use ns/us then divide to preserve precision */ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * sr_hdisplay; + pixel_size * sr_hdisplay; sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); srwm = total_size - sr_entries; @@ -3242,7 +3328,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", - planea_wm, planeb_wm, cwm, srwm); + planea_wm, planeb_wm, cwm, srwm); fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); fwater_hi = (cwm & 0x1f); @@ -3276,146 +3362,130 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, #define ILK_LP0_PLANE_LATENCY 700 #define ILK_LP0_CURSOR_LATENCY 1300 -static void ironlake_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int sr_htotal, - int pixel_size) +static bool ironlake_compute_wm0(struct drm_device *dev, + int pipe, + int *plane_wm, + int *cursor_wm) +{ + struct drm_crtc *crtc; + int htotal, hdisplay, clock, pixel_size = 0; + int line_time_us, line_count, entries; + + crtc = intel_get_crtc_for_pipe(dev, pipe); + if (crtc->fb == NULL || !crtc->enabled) + return false; + + htotal = crtc->mode.htotal; + hdisplay = crtc->mode.hdisplay; + clock = crtc->mode.clock; + pixel_size = crtc->fb->bits_per_pixel / 8; + + /* Use the small buffer method to calculate plane watermark */ + entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000; + entries = DIV_ROUND_UP(entries, + ironlake_display_wm_info.cacheline_size); + *plane_wm = entries + ironlake_display_wm_info.guard_size; + if (*plane_wm > (int)ironlake_display_wm_info.max_wm) + *plane_wm = ironlake_display_wm_info.max_wm; + + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = ((htotal * 1000) / clock); + line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; + entries = line_count * 64 * pixel_size; + entries = DIV_ROUND_UP(entries, + ironlake_cursor_wm_info.cacheline_size); + *cursor_wm = entries + ironlake_cursor_wm_info.guard_size; + if (*cursor_wm > ironlake_cursor_wm_info.max_wm) + *cursor_wm = ironlake_cursor_wm_info.max_wm; + + return true; +} + +static void ironlake_update_wm(struct drm_device *dev, + int planea_clock, int planeb_clock, + int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; - int planea_wm, planeb_wm, cursora_wm, cursorb_wm; - int sr_wm, cursor_wm; - unsigned long line_time_us; - int sr_clock, entries_required; - u32 reg_value; - int line_count; - int planea_htotal = 0, planeb_htotal = 0; - struct drm_crtc *crtc; + int plane_wm, cursor_wm, enabled; + int tmp; - /* Need htotal for all active display plane */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { - if (intel_crtc->plane == 0) - planea_htotal = crtc->mode.htotal; - else - planeb_htotal = crtc->mode.htotal; - } + enabled = 0; + if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEA_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled++; } - /* Calculate and update the watermark for plane A */ - if (planea_clock) { - entries_required = ((planea_clock / 1000) * pixel_size * - ILK_LP0_PLANE_LATENCY) / 1000; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_wm_info.cacheline_size); - planea_wm = entries_required + - ironlake_display_wm_info.guard_size; - - if (planea_wm > (int)ironlake_display_wm_info.max_wm) - planea_wm = ironlake_display_wm_info.max_wm; - - /* Use the large buffer method to calculate cursor watermark */ - line_time_us = (planea_htotal * 1000) / planea_clock; - - /* Use ns/us then divide to preserve precision */ - line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; - - /* calculate the cursor watermark for cursor A */ - entries_required = line_count * 64 * pixel_size; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_cursor_wm_info.cacheline_size); - cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size; - if (cursora_wm > ironlake_cursor_wm_info.max_wm) - cursora_wm = ironlake_cursor_wm_info.max_wm; - - reg_value = I915_READ(WM0_PIPEA_ILK); - reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | - (cursora_wm & WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEA_ILK, reg_value); - DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, " - "cursor: %d\n", planea_wm, cursora_wm); - } - /* Calculate and update the watermark for plane B */ - if (planeb_clock) { - entries_required = ((planeb_clock / 1000) * pixel_size * - ILK_LP0_PLANE_LATENCY) / 1000; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_wm_info.cacheline_size); - planeb_wm = entries_required + - ironlake_display_wm_info.guard_size; - - if (planeb_wm > (int)ironlake_display_wm_info.max_wm) - planeb_wm = ironlake_display_wm_info.max_wm; - - /* Use the large buffer method to calculate cursor watermark */ - line_time_us = (planeb_htotal * 1000) / planeb_clock; - - /* Use ns/us then divide to preserve precision */ - line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; - - /* calculate the cursor watermark for cursor B */ - entries_required = line_count * 64 * pixel_size; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_cursor_wm_info.cacheline_size); - cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; - if (cursorb_wm > ironlake_cursor_wm_info.max_wm) - cursorb_wm = ironlake_cursor_wm_info.max_wm; - - reg_value = I915_READ(WM0_PIPEB_ILK); - reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); - reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | - (cursorb_wm & WM0_PIPE_CURSOR_MASK); - I915_WRITE(WM0_PIPEB_ILK, reg_value); - DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, " - "cursor: %d\n", planeb_wm, cursorb_wm); + if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEB_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled++; } /* * Calculate and update the self-refresh watermark only when one * display plane is used. */ - if (!planea_clock || !planeb_clock) { - + tmp = 0; + if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) { + unsigned long line_time_us; + int small, large, plane_fbc; + int sr_clock, entries; + int line_count, line_size; /* Read the self-refresh latency. The unit is 0.5us */ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_htotal * 1000) / sr_clock); + line_time_us = (sr_htotal * 1000) / sr_clock; /* Use ns/us then divide to preserve precision */ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) - / 1000; + / 1000; + line_size = sr_hdisplay * pixel_size; - /* calculate the self-refresh watermark for display plane */ - entries_required = line_count * sr_hdisplay * pixel_size; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_srwm_info.cacheline_size); - sr_wm = entries_required + - ironlake_display_srwm_info.guard_size; + /* Use the minimum of the small and large buffer method for primary */ + small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), + ironlake_display_srwm_info.cacheline_size); + + plane_fbc = entries * 64; + plane_fbc = DIV_ROUND_UP(plane_fbc, line_size); + + plane_wm = entries + ironlake_display_srwm_info.guard_size; + if (plane_wm > (int)ironlake_display_srwm_info.max_wm) + plane_wm = ironlake_display_srwm_info.max_wm; /* calculate the self-refresh watermark for display cursor */ - entries_required = line_count * pixel_size * 64; - entries_required = DIV_ROUND_UP(entries_required, - ironlake_cursor_srwm_info.cacheline_size); - cursor_wm = entries_required + - ironlake_cursor_srwm_info.guard_size; + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, + ironlake_cursor_srwm_info.cacheline_size); + + cursor_wm = entries + ironlake_cursor_srwm_info.guard_size; + if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm) + cursor_wm = ironlake_cursor_srwm_info.max_wm; /* configure watermark and enable self-refresh */ - reg_value = I915_READ(WM1_LP_ILK); - reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | - WM1_LP_CURSOR_MASK); - reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | - (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; - - I915_WRITE(WM1_LP_ILK, reg_value); - DRM_DEBUG_KMS("self-refresh watermark: display plane %d " - "cursor %d\n", sr_wm, cursor_wm); - - } else { - /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); + tmp = (WM1_LP_SR_EN | + (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | + (plane_fbc << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d," + " cursor %d\n", plane_wm, plane_fbc, cursor_wm); } + I915_WRITE(WM1_LP_ILK, tmp); + /* XXX setup WM2 and WM3 */ } + /** * intel_update_watermarks - update FIFO watermark values based on current modes * @@ -3447,7 +3517,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, * * We don't use the sprite, so we can ignore that. And on Crestline we have * to set the non-SR watermarks to 8. - */ + */ static void intel_update_watermarks(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3463,15 +3533,15 @@ static void intel_update_watermarks(struct drm_device *dev) /* Get the clock config from both planes */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) { + if (intel_crtc->active) { enabled++; if (intel_crtc->plane == 0) { DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", - intel_crtc->pipe, crtc->mode.clock); + intel_crtc->pipe, crtc->mode.clock); planea_clock = crtc->mode.clock; } else { DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n", - intel_crtc->pipe, crtc->mode.clock); + intel_crtc->pipe, crtc->mode.clock); planeb_clock = crtc->mode.clock; } sr_hdisplay = crtc->mode.hdisplay; @@ -3502,62 +3572,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int fp_reg = (pipe == 0) ? FPA0 : FPB0; - int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; - int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD; - int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; - int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; - int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; - int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; - int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; - int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; - int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; - int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; - int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; - int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; - int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; + u32 fp_reg, dpll_reg; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; - u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; + u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; struct intel_encoder *has_edp_encoder = NULL; struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; + struct intel_encoder *encoder; const intel_limit_t *limit; int ret; struct fdi_m_n m_n = {0}; - int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1; - int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1; - int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1; - int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1; - int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0; - int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; - int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; - int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; - int trans_dpll_sel = (pipe == 0) ? 0 : 1; - int lvds_reg = LVDS; - u32 temp; - int sdvo_pixel_multiply; + u32 reg, temp; int target_clock; drm_vblank_pre_modeset(dev, pipe); - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_encoder *intel_encoder; - - if (encoder->crtc != crtc) + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + if (encoder->base.crtc != crtc) continue; - intel_encoder = enc_to_intel_encoder(encoder); - switch (intel_encoder->type) { + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; - if (intel_encoder->needs_tv_clock) + if (encoder->needs_tv_clock) is_tv = true; break; case INTEL_OUTPUT_DVO: @@ -3573,7 +3616,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, is_dp = true; break; case INTEL_OUTPUT_EDP: - has_edp_encoder = intel_encoder; + has_edp_encoder = encoder; break; } @@ -3583,15 +3626,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) { refclk = dev_priv->lvds_ssc_freq * 1000; DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", - refclk / 1000); - } else if (IS_I9XX(dev)) { + refclk / 1000); + } else if (!IS_GEN2(dev)) { refclk = 96000; - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev) && + (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base))) refclk = 120000; /* 120Mhz refclk */ } else { refclk = 48000; } - /* * Returns a set of divisors for the desired target clock with the given @@ -3607,13 +3650,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* Ensure that the cursor is valid for the new mode before changing... */ - intel_crtc_update_cursor(crtc); + intel_crtc_update_cursor(crtc, true); if (is_lvds && dev_priv->lvds_downclock_avail) { has_reduced_clock = limit->find_pll(limit, crtc, - dev_priv->lvds_downclock, - refclk, - &reduced_clock); + dev_priv->lvds_downclock, + refclk, + &reduced_clock); if (has_reduced_clock && (clock.p != reduced_clock.p)) { /* * If the different P is found, it means that we can't @@ -3622,7 +3665,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * feature. */ DRM_DEBUG_KMS("Different P is found for " - "LVDS clock/downclock\n"); + "LVDS clock/downclock\n"); has_reduced_clock = 0; } } @@ -3630,14 +3673,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, this mirrors vbios setting. */ if (is_sdvo && is_tv) { if (adjusted_mode->clock >= 100000 - && adjusted_mode->clock < 140500) { + && adjusted_mode->clock < 140500) { clock.p1 = 2; clock.p2 = 10; clock.n = 3; clock.m1 = 16; clock.m2 = 8; } else if (adjusted_mode->clock >= 140500 - && adjusted_mode->clock <= 200000) { + && adjusted_mode->clock <= 200000) { clock.p1 = 1; clock.p2 = 10; clock.n = 6; @@ -3649,34 +3692,41 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* FDI link */ if (HAS_PCH_SPLIT(dev)) { int lane = 0, link_bw, bpp; - /* eDP doesn't require FDI link, so just set DP M/N + /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ - if (has_edp_encoder) { + if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { target_clock = mode->clock; intel_edp_link_config(has_edp_encoder, &lane, &link_bw); } else { - /* DP over FDI requires target mode clock + /* [e]DP over FDI requires target mode clock instead of link clock */ - if (is_dp) + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) target_clock = mode->clock; else target_clock = adjusted_mode->clock; - link_bw = 270000; + + /* FDI is a binary signal running at ~2.7GHz, encoding + * each output octet as 10 bits. The actual frequency + * is stored as a divider into a 100MHz clock, and the + * mode pixel clock is stored in units of 1KHz. + * Hence the bw of each lane in terms of the mode signal + * is: + */ + link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; } /* determine panel color depth */ - temp = I915_READ(pipeconf_reg); + temp = I915_READ(PIPECONF(pipe)); temp &= ~PIPE_BPC_MASK; if (is_lvds) { - int lvds_reg = I915_READ(PCH_LVDS); /* the BPC will be 6 if it is 18-bit LVDS panel */ - if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) + if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) temp |= PIPE_8BPC; else temp |= PIPE_6BPC; - } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { - switch (dev_priv->edp_bpp/3) { + } else if (has_edp_encoder) { + switch (dev_priv->edp.bpp/3) { case 8: temp |= PIPE_8BPC; break; @@ -3692,8 +3742,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } else temp |= PIPE_8BPC; - I915_WRITE(pipeconf_reg, temp); - I915_READ(pipeconf_reg); + I915_WRITE(PIPECONF(pipe), temp); switch (temp & PIPE_BPC_MASK) { case PIPE_8BPC: @@ -3738,33 +3787,39 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Always enable nonspread source */ temp &= ~DREF_NONSPREAD_SOURCE_MASK; temp |= DREF_NONSPREAD_SOURCE_ENABLE; - I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); - temp &= ~DREF_SSC_SOURCE_MASK; temp |= DREF_SSC_SOURCE_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); + POSTING_READ(PCH_DREF_CONTROL); udelay(200); if (has_edp_encoder) { if (dev_priv->lvds_use_ssc) { temp |= DREF_SSC1_ENABLE; I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); + POSTING_READ(PCH_DREF_CONTROL); udelay(200); - - temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); - } else { - temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); } + temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Enable CPU source on CPU attached eDP */ + if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (dev_priv->lvds_use_ssc) + temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + else + temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } else { + /* Enable SSC on PCH eDP if needed */ + if (dev_priv->lvds_use_ssc) { + DRM_ERROR("enabling SSC on PCH\n"); + temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; + } + } + I915_WRITE(PCH_DREF_CONTROL, temp); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); } } @@ -3780,23 +3835,26 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, reduced_clock.m2; } + dpll = 0; if (!HAS_PCH_SPLIT(dev)) dpll = DPLL_VGA_MODE_DIS; - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { if (is_lvds) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + if (pixel_multiplier > 1) { + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; + else if (HAS_PCH_SPLIT(dev)) + dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; + } dpll |= DPLL_DVO_HIGH_SPEED; - sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; - else if (HAS_PCH_SPLIT(dev)) - dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } - if (is_dp) + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ @@ -3824,7 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); } else { if (is_lvds) { @@ -3851,7 +3909,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll |= PLL_REF_INPUT_DREFCLK; /* setup pipeconf */ - pipeconf = I915_READ(pipeconf_reg); + pipeconf = I915_READ(PIPECONF(pipe)); /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -3865,7 +3923,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dspcntr |= DISPPLANE_SEL_PIPE_B; } - if (pipe == 0 && !IS_I965G(dev)) { + if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { /* Enable pixel doubling when the dot clock is > 90% of the (display) * core speed. * @@ -3874,51 +3932,47 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, */ if (mode->clock > dev_priv->display.get_display_clock_speed(dev) * 9 / 10) - pipeconf |= PIPEACONF_DOUBLE_WIDE; + pipeconf |= PIPECONF_DOUBLE_WIDE; else - pipeconf &= ~PIPEACONF_DOUBLE_WIDE; + pipeconf &= ~PIPECONF_DOUBLE_WIDE; } dspcntr |= DISPLAY_PLANE_ENABLE; - pipeconf |= PIPEACONF_ENABLE; + pipeconf |= PIPECONF_ENABLE; dpll |= DPLL_VCO_ENABLE; - - /* Disable the panel fitter if it was on our pipe */ - if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) - I915_WRITE(PFIT_CONTROL, 0); - DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); /* assign to Ironlake registers */ if (HAS_PCH_SPLIT(dev)) { - fp_reg = pch_fp_reg; - dpll_reg = pch_dpll_reg; + fp_reg = PCH_FP0(pipe); + dpll_reg = PCH_DPLL(pipe); + } else { + fp_reg = FP0(pipe); + dpll_reg = DPLL(pipe); } - if (!has_edp_encoder) { + /* PCH eDP needs FDI, but CPU eDP does not */ + if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); - I915_READ(dpll_reg); + + POSTING_READ(dpll_reg); udelay(150); } /* enable transcoder DPLL */ if (HAS_PCH_CPT(dev)) { temp = I915_READ(PCH_DPLL_SEL); - if (trans_dpll_sel == 0) - temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); + if (pipe == 0) + temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; else - temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); + temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; I915_WRITE(PCH_DPLL_SEL, temp); - I915_READ(PCH_DPLL_SEL); - udelay(150); - } - if (HAS_PCH_SPLIT(dev)) { - pipeconf &= ~PIPE_ENABLE_DITHER; - pipeconf &= ~PIPE_DITHER_TYPE_MASK; + POSTING_READ(PCH_DPLL_SEL); + udelay(150); } /* The LVDS pin pair needs to be on before the DPLLs are enabled. @@ -3926,58 +3980,60 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * things on. */ if (is_lvds) { - u32 lvds; - + reg = LVDS; if (HAS_PCH_SPLIT(dev)) - lvds_reg = PCH_LVDS; + reg = PCH_LVDS; - lvds = I915_READ(lvds_reg); - lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; + temp = I915_READ(reg); + temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (pipe == 1) { if (HAS_PCH_CPT(dev)) - lvds |= PORT_TRANS_B_SEL_CPT; + temp |= PORT_TRANS_B_SEL_CPT; else - lvds |= LVDS_PIPEB_SELECT; + temp |= LVDS_PIPEB_SELECT; } else { if (HAS_PCH_CPT(dev)) - lvds &= ~PORT_TRANS_SEL_MASK; + temp &= ~PORT_TRANS_SEL_MASK; else - lvds &= ~LVDS_PIPEB_SELECT; + temp &= ~LVDS_PIPEB_SELECT; } /* set the corresponsding LVDS_BORDER bit */ - lvds |= dev_priv->lvds_border_bits; + temp |= dev_priv->lvds_border_bits; /* Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. */ if (clock.p2 == 7) - lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; + temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; else - lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); + temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more thoroughly into how * panels behave in the two modes. */ - /* set the dithering flag */ - if (IS_I965G(dev)) { - if (dev_priv->lvds_dither) { - if (HAS_PCH_SPLIT(dev)) { - pipeconf |= PIPE_ENABLE_DITHER; - pipeconf |= PIPE_DITHER_TYPE_ST01; - } else - lvds |= LVDS_ENABLE_DITHER; - } else { - if (!HAS_PCH_SPLIT(dev)) { - lvds &= ~LVDS_ENABLE_DITHER; - } - } + /* set the dithering flag on non-PCH LVDS as needed */ + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { + if (dev_priv->lvds_dither) + temp |= LVDS_ENABLE_DITHER; + else + temp &= ~LVDS_ENABLE_DITHER; } - I915_WRITE(lvds_reg, lvds); - I915_READ(lvds_reg); + I915_WRITE(reg, temp); } - if (is_dp) + + /* set the dithering flag and clear for anything other than a panel. */ + if (HAS_PCH_SPLIT(dev)) { + pipeconf &= ~PIPECONF_DITHER_EN; + pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; + if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { + pipeconf |= PIPECONF_DITHER_EN; + pipeconf |= PIPECONF_DITHER_TYPE_ST1; + } + } + + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { intel_dp_set_m_n(crtc, mode, adjusted_mode); - else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev)) { /* For non-DP output, clear any trans DP clock recovery setting.*/ if (pipe == 0) { I915_WRITE(TRANSA_DATA_M1, 0); @@ -3992,29 +4048,35 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } - if (!has_edp_encoder) { + if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); - I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + POSTING_READ(dpll_reg); udelay(150); - if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { + temp = 0; if (is_sdvo) { - sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; - I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | - ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); - } else - I915_WRITE(dpll_md_reg, 0); + temp = intel_mode_get_pixel_multiplier(adjusted_mode); + if (temp > 1) + temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + else + temp = 0; + } + I915_WRITE(DPLL_MD(pipe), temp); } else { /* write it again -- the BIOS does, after all */ I915_WRITE(dpll_reg, dpll); } - I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + POSTING_READ(dpll_reg); udelay(150); } + intel_crtc->lowfreq_avail = false; if (is_lvds && has_reduced_clock && i915_powersave) { I915_WRITE(fp_reg + 4, fp2); intel_crtc->lowfreq_avail = true; @@ -4024,7 +4086,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } else { I915_WRITE(fp_reg + 4, fp); - intel_crtc->lowfreq_avail = false; if (HAS_PIPE_CXSR(dev)) { DRM_DEBUG_KMS("disabling CxSR downclocking\n"); pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; @@ -4043,70 +4104,62 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } else pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ - I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | + I915_WRITE(HTOTAL(pipe), + (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); - I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | + I915_WRITE(HBLANK(pipe), + (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); - I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | + I915_WRITE(HSYNC(pipe), + (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); - I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | + + I915_WRITE(VTOTAL(pipe), + (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); - I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | + I915_WRITE(VBLANK(pipe), + (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); - I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | + I915_WRITE(VSYNC(pipe), + (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); - /* pipesrc and dspsize control the size that is scaled from, which should - * always be the user's requested size. + + /* pipesrc and dspsize control the size that is scaled from, + * which should always be the user's requested size. */ if (!HAS_PCH_SPLIT(dev)) { - I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | - (mode->hdisplay - 1)); - I915_WRITE(dsppos_reg, 0); + I915_WRITE(DSPSIZE(plane), + ((mode->vdisplay - 1) << 16) | + (mode->hdisplay - 1)); + I915_WRITE(DSPPOS(plane), 0); } - I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); + I915_WRITE(PIPESRC(pipe), + ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); - I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); - I915_WRITE(link_m1_reg, m_n.link_m); - I915_WRITE(link_n1_reg, m_n.link_n); + I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); + I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); + I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); + I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); - if (has_edp_encoder) { + if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { ironlake_set_pll_edp(crtc, adjusted_mode->clock); - } else { - /* enable FDI RX PLL too */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - I915_READ(fdi_rx_reg); - udelay(200); - - /* enable FDI TX PLL too */ - temp = I915_READ(fdi_tx_reg); - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - - /* enable FDI RX PCDCLK */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK); - I915_READ(fdi_rx_reg); - udelay(200); } } - I915_WRITE(pipeconf_reg, pipeconf); - I915_READ(pipeconf_reg); + I915_WRITE(PIPECONF(pipe), pipeconf); + POSTING_READ(PIPECONF(pipe)); intel_wait_for_vblank(dev, pipe); - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { /* enable address swizzle for tiling buffer */ temp = I915_READ(DISP_ARB_CTL); I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); } - I915_WRITE(dspcntr_reg, dspcntr); + I915_WRITE(DSPCNTR(plane), dspcntr); - /* Flush the plane changes */ ret = intel_pipe_set_base(crtc, x, y, old_fb); intel_update_watermarks(dev); @@ -4199,7 +4252,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ -static void intel_crtc_update_cursor(struct drm_crtc *crtc) +static void intel_crtc_update_cursor(struct drm_crtc *crtc, + bool on) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4212,7 +4266,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc) pos = 0; - if (intel_crtc->cursor_on && crtc->fb) { + if (on && crtc->enabled && crtc->fb) { base = intel_crtc->cursor_addr; if (x > (int) crtc->fb->width) base = 0; @@ -4324,7 +4378,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = obj_priv->phys_obj->handle->busaddr; } - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) I915_WRITE(CURSIZE, (height << 12) | width); finish: @@ -4344,7 +4398,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc->cursor_width = width; intel_crtc->cursor_height = height; - intel_crtc_update_cursor(crtc); + intel_crtc_update_cursor(crtc, true); return 0; fail_unpin: @@ -4363,7 +4417,7 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) intel_crtc->cursor_x = x; intel_crtc->cursor_y = y; - intel_crtc_update_cursor(crtc); + intel_crtc_update_cursor(crtc, true); return 0; } @@ -4432,7 +4486,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct intel_crtc *intel_crtc; struct drm_crtc *possible_crtc; struct drm_crtc *supported_crtc =NULL; - struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -4513,7 +4567,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, int dpms_mode) { - struct drm_encoder *encoder = &intel_encoder->enc; + struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -4559,7 +4613,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; } - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { if (IS_PINEVIEW(dev)) clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); @@ -4663,8 +4717,6 @@ static void intel_gpu_idle_timer(unsigned long arg) struct drm_device *dev = (struct drm_device *)arg; drm_i915_private_t *dev_priv = dev->dev_private; - DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); - dev_priv->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); @@ -4678,14 +4730,12 @@ static void intel_crtc_idle_timer(unsigned long arg) struct drm_crtc *crtc = &intel_crtc->base; drm_i915_private_t *dev_priv = crtc->dev->dev_private; - DRM_DEBUG_DRIVER("idle timer fired, downclocking\n"); - intel_crtc->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); } -static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) +static void intel_increase_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -4720,9 +4770,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) } /* Schedule downclock */ - if (schedule) - mod_timer(&intel_crtc->idle_timer, jiffies + - msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); } static void intel_decrease_pllclock(struct drm_crtc *crtc) @@ -4858,7 +4907,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); } /* Non-busy -> busy, upclock */ - intel_increase_pllclock(crtc, true); + intel_increase_pllclock(crtc); intel_crtc->busy = true; } else { /* Busy -> busy, put off timer */ @@ -4872,8 +4921,22 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct intel_unpin_work *work; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->unpin_work; + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (work) { + cancel_work_sync(&work->work); + kfree(work); + } drm_crtc_cleanup(crtc); + kfree(intel_crtc); } @@ -4928,12 +4991,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev, spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = to_intel_bo(work->pending_flip_obj); - - /* Initial scanout buffer will have a 0 pending flip count */ - if ((atomic_read(&obj_priv->pending_flip) == 0) || - atomic_dec_and_test(&obj_priv->pending_flip)) - DRM_WAKEUP(&dev_priv->pending_flip_queue); + obj_priv = to_intel_bo(work->old_fb_obj); + atomic_clear_mask(1 << intel_crtc->plane, + &obj_priv->pending_flip.counter); + if (atomic_read(&obj_priv->pending_flip) == 0) + wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); @@ -5014,7 +5076,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj); + ret = intel_pin_and_fence_fb_obj(dev, obj, true); if (ret) goto cleanup_work; @@ -5023,29 +5085,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, drm_gem_object_reference(obj); crtc->fb = fb; - ret = i915_gem_object_flush_write_domain(obj); - if (ret) - goto cleanup_objs; ret = drm_vblank_get(dev, intel_crtc->pipe); if (ret) goto cleanup_objs; - obj_priv = to_intel_bo(obj); - atomic_inc(&obj_priv->pending_flip); + /* Block clients from rendering to the new back buffer until + * the flip occurs and the object is no longer visible. + */ + atomic_add(1 << intel_crtc->plane, + &to_intel_bo(work->old_fb_obj)->pending_flip); + work->pending_flip_obj = obj; + obj_priv = to_intel_bo(obj); if (IS_GEN3(dev) || IS_GEN2(dev)) { u32 flip_mask; + /* Can't queue multiple flips, so wait for the previous + * one to finish before executing the next. + */ + BEGIN_LP_RING(2); if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - - BEGIN_LP_RING(2); OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); - OUT_RING(0); + OUT_RING(MI_NOOP); ADVANCE_LP_RING(); } @@ -5126,15 +5192,14 @@ cleanup_work: return ret; } -static const struct drm_crtc_helper_funcs intel_helper_funcs = { +static struct drm_crtc_helper_funcs intel_helper_funcs = { .dpms = intel_crtc_dpms, .mode_fixup = intel_crtc_mode_fixup, .mode_set = intel_crtc_mode_set, .mode_set_base = intel_pipe_set_base, .mode_set_base_atomic = intel_pipe_set_base_atomic, - .prepare = intel_crtc_prepare, - .commit = intel_crtc_commit, .load_lut = intel_crtc_load_lut, + .disable = intel_crtc_disable, }; static const struct drm_crtc_funcs intel_crtc_funcs = { @@ -5160,8 +5225,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); - intel_crtc->pipe = pipe; - intel_crtc->plane = pipe; for (i = 0; i < 256; i++) { intel_crtc->lut_r[i] = i; intel_crtc->lut_g[i] = i; @@ -5171,9 +5234,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) /* Swap pipes & planes for FBC on pre-965 */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; - if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { + if (IS_MOBILE(dev) && IS_GEN3(dev)) { DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); - intel_crtc->plane = ((pipe == 0) ? 1 : 0); + intel_crtc->plane = !pipe; } BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || @@ -5183,6 +5246,16 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) intel_crtc->cursor_addr = 0; intel_crtc->dpms_mode = -1; + intel_crtc->active = true; /* force the pipe off on setup_init_config */ + + if (HAS_PCH_SPLIT(dev)) { + intel_helper_funcs.prepare = ironlake_crtc_prepare; + intel_helper_funcs.commit = ironlake_crtc_commit; + } else { + intel_helper_funcs.prepare = i9xx_crtc_prepare; + intel_helper_funcs.commit = i9xx_crtc_commit; + } + drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); intel_crtc->busy = false; @@ -5218,38 +5291,25 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, return 0; } -struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) -{ - struct drm_crtc *crtc = NULL; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (intel_crtc->pipe == pipe) - break; - } - return crtc; -} - static int intel_encoder_clones(struct drm_device *dev, int type_mask) { + struct intel_encoder *encoder; int index_mask = 0; - struct drm_encoder *encoder; int entry = 0; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - if (type_mask & intel_encoder->clone_mask) + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + if (type_mask & encoder->clone_mask) index_mask |= (1 << entry); entry++; } + return index_mask; } - static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder *encoder; + struct intel_encoder *encoder; bool dpd_is_edp = false; if (IS_MOBILE(dev) && !IS_I830(dev)) @@ -5338,12 +5398,10 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_TV(dev)) intel_tv_init(dev); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - - encoder->possible_crtcs = intel_encoder->crtc_mask; - encoder->possible_clones = intel_encoder_clones(dev, - intel_encoder->clone_mask); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + encoder->base.possible_crtcs = encoder->crtc_mask; + encoder->base.possible_clones = + intel_encoder_clones(dev, encoder->clone_mask); } } @@ -5377,8 +5435,25 @@ int intel_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; + if (obj_priv->tiling_mode == I915_TILING_Y) + return -EINVAL; + + if (mode_cmd->pitch & 63) + return -EINVAL; + + switch (mode_cmd->bpp) { + case 8: + case 16: + case 24: + case 32: + break; + default: + return -EINVAL; + } + ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); @@ -5487,6 +5562,10 @@ void ironlake_enable_drps(struct drm_device *dev) u32 rgvmodectl = I915_READ(MEMMODECTL); u8 fmax, fmin, fstart, vstart; + /* Enable temp reporting */ + I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); + I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); + /* 100ms RC evaluation intervals */ I915_WRITE(RCUPEI, 100000); I915_WRITE(RCDNEI, 100000); @@ -5529,7 +5608,7 @@ void ironlake_enable_drps(struct drm_device *dev) rgvmodectl |= MEMMODE_SWMODE_EN; I915_WRITE(MEMMODECTL, rgvmodectl); - if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0)) + if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) DRM_ERROR("stuck trying to change perf mode\n"); msleep(1); @@ -5660,7 +5739,7 @@ void intel_init_clock_gating(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) { uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { /* Required for FBC */ dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; /* Required for CxSR */ @@ -5673,6 +5752,13 @@ void intel_init_clock_gating(struct drm_device *dev) I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); + /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + /* * According to the spec the following bits should be set in * order to enable memory self-refresh @@ -5680,7 +5766,7 @@ void intel_init_clock_gating(struct drm_device *dev) * The bit 5 of 0x42020 * The bit 15 of 0x45000 */ - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { I915_WRITE(ILK_DISPLAY_CHICKEN2, (I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); @@ -5728,20 +5814,20 @@ void intel_init_clock_gating(struct drm_device *dev) if (IS_GM45(dev)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, dspclk_gate); - } else if (IS_I965GM(dev)) { + } else if (IS_CRESTLINE(dev)) { I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); I915_WRITE(DSPCLK_GATE_D, 0); I915_WRITE(RAMCLK_GATE_D, 0); I915_WRITE16(DEUC, 0); - } else if (IS_I965G(dev)) { + } else if (IS_BROADWATER(dev)) { I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | I965_RCC_CLOCK_GATE_DISABLE | I965_RCPB_CLOCK_GATE_DISABLE | I965_ISC_CLOCK_GATE_DISABLE | I965_FBC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); - } else if (IS_I9XX(dev)) { + } else if (IS_GEN3(dev)) { u32 dstate = I915_READ(D_STATE); dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | @@ -5823,7 +5909,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.fbc_enabled = g4x_fbc_enabled; dev_priv->display.enable_fbc = g4x_enable_fbc; dev_priv->display.disable_fbc = g4x_disable_fbc; - } else if (IS_I965GM(dev)) { + } else if (IS_CRESTLINE(dev)) { dev_priv->display.fbc_enabled = i8xx_fbc_enabled; dev_priv->display.enable_fbc = i8xx_enable_fbc; dev_priv->display.disable_fbc = i8xx_disable_fbc; @@ -5856,7 +5942,7 @@ static void intel_init_display(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) dev_priv->display.update_wm = ironlake_update_wm; else { @@ -5883,9 +5969,9 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_wm = pineview_update_wm; } else if (IS_G4X(dev)) dev_priv->display.update_wm = g4x_update_wm; - else if (IS_I965G(dev)) + else if (IS_GEN4(dev)) dev_priv->display.update_wm = i965_update_wm; - else if (IS_I9XX(dev)) { + else if (IS_GEN3(dev)) { dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; } else if (IS_I85X(dev)) { @@ -5999,24 +6085,24 @@ void intel_modeset_init(struct drm_device *dev) intel_init_display(dev); - if (IS_I965G(dev)) { - dev->mode_config.max_width = 8192; - dev->mode_config.max_height = 8192; - } else if (IS_I9XX(dev)) { + if (IS_GEN2(dev)) { + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + } else if (IS_GEN3(dev)) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else { - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; } /* set memory base */ - if (IS_I9XX(dev)) - dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); - else + if (IS_GEN2(dev)) dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); + else + dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2); - if (IS_MOBILE(dev) || IS_I9XX(dev)) + if (IS_MOBILE(dev) || !IS_GEN2(dev)) dev_priv->num_pipe = 2; else dev_priv->num_pipe = 1; @@ -6052,10 +6138,11 @@ void intel_modeset_cleanup(struct drm_device *dev) struct drm_crtc *crtc; struct intel_crtc *intel_crtc; + drm_kms_helper_poll_fini(dev); mutex_lock(&dev->struct_mutex); - drm_kms_helper_poll_fini(dev); - intel_fbdev_fini(dev); + intel_unregister_dsm_handler(); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ @@ -6063,12 +6150,9 @@ void intel_modeset_cleanup(struct drm_device *dev) continue; intel_crtc = to_intel_crtc(crtc); - intel_increase_pllclock(crtc, false); - del_timer_sync(&intel_crtc->idle_timer); + intel_increase_pllclock(crtc); } - del_timer_sync(&dev_priv->idle_timer); - if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); @@ -6097,33 +6181,36 @@ void intel_modeset_cleanup(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); + /* Disable the irq before mode object teardown, for the irq might + * enqueue unpin/hotplug work. */ + drm_irq_uninstall(dev); + cancel_work_sync(&dev_priv->hotplug_work); + + /* Shut off idle work before the crtcs get freed. */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + del_timer_sync(&intel_crtc->idle_timer); + } + del_timer_sync(&dev_priv->idle_timer); + cancel_work_sync(&dev_priv->idle_work); + drm_mode_config_cleanup(dev); } - /* * Return which encoder is currently attached for connector. */ -struct drm_encoder *intel_attached_encoder (struct drm_connector *connector) +struct drm_encoder *intel_best_encoder(struct drm_connector *connector) { - struct drm_mode_object *obj; - struct drm_encoder *encoder; - int i; + return &intel_attached_encoder(connector)->base; +} - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - obj = drm_mode_object_find(connector->dev, - connector->encoder_ids[i], - DRM_MODE_OBJECT_ENCODER); - if (!obj) - continue; - - encoder = obj_to_encoder(obj); - return encoder; - } - return NULL; +void intel_connector_attach_encoder(struct intel_connector *connector, + struct intel_encoder *encoder) +{ + connector->encoder = encoder; + drm_mode_connector_attach_encoder(&connector->base, + &encoder->base); } /* diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9ab8708ac6ba..891f4f1d63b1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -42,15 +42,13 @@ #define DP_LINK_CONFIGURATION_SIZE 9 -#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP) -#define IS_PCH_eDP(i) ((i)->is_pch_edp) - struct intel_dp { struct intel_encoder base; uint32_t output_reg; uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; + int force_audio; int dpms_mode; uint8_t link_bw; uint8_t lane_count; @@ -58,14 +56,69 @@ struct intel_dp { struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; bool is_pch_edp; + uint8_t train_set[4]; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + struct drm_property *force_audio_property; }; +/** + * is_edp - is the given port attached to an eDP panel (either CPU or PCH) + * @intel_dp: DP struct + * + * If a CPU or PCH DP output is attached to an eDP panel, this function + * will return true, and false otherwise. + */ +static bool is_edp(struct intel_dp *intel_dp) +{ + return intel_dp->base.type == INTEL_OUTPUT_EDP; +} + +/** + * is_pch_edp - is the port on the PCH and attached to an eDP panel? + * @intel_dp: DP struct + * + * Returns true if the given DP struct corresponds to a PCH DP port attached + * to an eDP panel, false otherwise. Helpful for determining whether we + * may need FDI resources for a given DP output or not. + */ +static bool is_pch_edp(struct intel_dp *intel_dp) +{ + return intel_dp->is_pch_edp; +} + static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base); + return container_of(encoder, struct intel_dp, base.base); } -static void intel_dp_link_train(struct intel_dp *intel_dp); +static struct intel_dp *intel_attached_dp(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_dp, base); +} + +/** + * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? + * @encoder: DRM encoder + * + * Return true if @encoder corresponds to a PCH attached eDP panel. Needed + * by intel_display.c. + */ +bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) +{ + struct intel_dp *intel_dp; + + if (!encoder) + return false; + + intel_dp = enc_to_intel_dp(encoder); + + return is_pch_edp(intel_dp); +} + +static void intel_dp_start_link_train(struct intel_dp *intel_dp); +static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); void @@ -129,8 +182,8 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - return (pixel_clock * dev_priv->edp_bpp) / 8; + if (is_edp(intel_dp)) + return (pixel_clock * dev_priv->edp.bpp + 7) / 8; else return pixel_clock * 3; } @@ -145,15 +198,13 @@ static int intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); int max_lanes = intel_dp_max_lane_count(intel_dp); - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && - dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) return MODE_PANEL; @@ -163,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector, /* only refuse the mode on non eDP since we have seen some wierd eDP panels which are outside spec tolerances but somehow work by magic */ - if (!IS_eDP(intel_dp) && + if (!is_edp(intel_dp) && (intel_dp_link_required(connector->dev, intel_dp, mode->clock) > intel_dp_max_data_rate(max_link_clock, max_lanes))) return MODE_CLOCK_HIGH; @@ -233,7 +284,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, uint8_t *recv, int recv_size) { uint32_t output_reg = intel_dp->output_reg; - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_data = ch_ctl + 4; @@ -246,8 +297,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that + * + * Note that PCH attached eDP panels should use a 125MHz input + * clock divider. */ - if (IS_eDP(intel_dp)) { + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { if (IS_GEN6(dev)) aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ else @@ -519,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && - dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); @@ -531,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, mode->clock = dev_priv->panel_fixed_mode->clock; } + /* Just use VBT values for eDP */ + if (is_edp(intel_dp)) { + intel_dp->lane_count = dev_priv->edp.lanes; + intel_dp->link_bw = dev_priv->edp.rate; + adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); + DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", + intel_dp->link_bw, intel_dp->lane_count, + adjusted_mode->clock); + return true; + } + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); @@ -549,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { - /* okay we failed just pick the highest */ - intel_dp->lane_count = max_lane_count; - intel_dp->link_bw = bws[max_clock]; - adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); - DRM_DEBUG_KMS("Force picking display port link bw %02x lane " - "count %d clock %d\n", - intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock); - - return true; - } - return false; } @@ -598,25 +649,6 @@ intel_dp_compute_m_n(int bpp, intel_reduce_ratio(&m_n->link_m, &m_n->link_n); } -bool intel_pch_has_edp(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; - - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_dp *intel_dp; - - if (encoder->crtc != crtc) - continue; - - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) - return intel_dp->is_pch_edp; - } - return false; -} - void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -641,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = intel_dp->lane_count; - if (IS_PCH_eDP(intel_dp)) - bpp = dev_priv->edp_bpp; + break; + } else if (is_edp(intel_dp)) { + lane_count = dev_priv->edp.lanes; + bpp = dev_priv->edp.bpp; break; } } @@ -698,7 +732,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, { struct drm_device *dev = encoder->dev; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_crtc *crtc = intel_dp->base.enc.crtc; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); intel_dp->DP = (DP_VOLTAGE_0_4 | @@ -709,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) intel_dp->DP |= DP_SYNC_VS_HIGH; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; else intel_dp->DP |= DP_LINK_TRAIN_OFF; @@ -744,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) intel_dp->DP |= DP_PIPEB_SELECT; - if (IS_eDP(intel_dp)) { + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { /* don't miss out required setting for eDP */ intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) @@ -754,13 +788,15 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, } } -static void ironlake_edp_panel_on (struct drm_device *dev) +/* Returns true if the panel was already on when called */ +static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) { + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; + u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; if (I915_READ(PCH_PP_STATUS) & PP_ON) - return; + return true; pp = I915_READ(PCH_PP_CONTROL); @@ -771,21 +807,30 @@ static void ironlake_edp_panel_on (struct drm_device *dev) pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); - if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10)) + /* Ouch. We need to wait here for some panels, like Dell e6510 + * https://bugs.freedesktop.org/show_bug.cgi?id=29278i + */ + msleep(300); + + if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, + 5000)) DRM_ERROR("panel on wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); - pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + + return false; } static void ironlake_edp_panel_off (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; + u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | + PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; pp = I915_READ(PCH_PP_CONTROL); @@ -796,15 +841,20 @@ static void ironlake_edp_panel_off (struct drm_device *dev) pp &= ~POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); - if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10)) + if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) DRM_ERROR("panel off wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); - /* Make sure VDD is enabled so DP AUX will work */ - pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */ + pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + + /* Ouch. We need to wait here for some panels, like Dell e6510 + * https://bugs.freedesktop.org/show_bug.cgi?id=29278i + */ + msleep(300); } static void ironlake_edp_backlight_on (struct drm_device *dev) @@ -813,6 +863,13 @@ static void ironlake_edp_backlight_on (struct drm_device *dev) u32 pp; DRM_DEBUG_KMS("\n"); + /* + * If we enable the backlight right away following a panel power + * on, we may see slight flicker as the panel syncs with the eDP + * link. So delay a bit to make sure the image is solid before + * allowing it to appear. + */ + msleep(300); pp = I915_READ(PCH_PP_CONTROL); pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); @@ -837,8 +894,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder) DRM_DEBUG_KMS("\n"); dpa_ctl = I915_READ(DP_A); - dpa_ctl &= ~DP_PLL_ENABLE; + dpa_ctl |= DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); + udelay(200); } static void ironlake_edp_pll_off(struct drm_encoder *encoder) @@ -848,8 +907,9 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) u32 dpa_ctl; dpa_ctl = I915_READ(DP_A); - dpa_ctl |= DP_PLL_ENABLE; + dpa_ctl &= ~DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); udelay(200); } @@ -857,29 +917,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(intel_dp->output_reg); - if (IS_eDP(intel_dp)) { + if (is_edp(intel_dp)) { ironlake_edp_backlight_off(dev); - ironlake_edp_panel_on(dev); - ironlake_edp_pll_on(encoder); + ironlake_edp_panel_on(intel_dp); + if (!is_pch_edp(intel_dp)) + ironlake_edp_pll_on(encoder); + else + ironlake_edp_pll_off(encoder); } - if (dp_reg & DP_PORT_EN) - intel_dp_link_down(intel_dp); + intel_dp_link_down(intel_dp); } static void intel_dp_commit(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(intel_dp->output_reg); - if (!(dp_reg & DP_PORT_EN)) { - intel_dp_link_train(intel_dp); - } - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + intel_dp_start_link_train(intel_dp); + + if (is_edp(intel_dp)) + ironlake_edp_panel_on(intel_dp); + + intel_dp_complete_link_train(intel_dp); + + if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); } @@ -892,22 +954,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp)) ironlake_edp_backlight_off(dev); + intel_dp_link_down(intel_dp); + if (is_edp(intel_dp)) ironlake_edp_panel_off(dev); - } - if (dp_reg & DP_PORT_EN) - intel_dp_link_down(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) ironlake_edp_pll_off(encoder); } else { + if (is_edp(intel_dp)) + ironlake_edp_panel_on(intel_dp); if (!(dp_reg & DP_PORT_EN)) { - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - ironlake_edp_panel_on(dev); - intel_dp_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - ironlake_edp_backlight_on(dev); + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); } + if (is_edp(intel_dp)) + ironlake_edp_backlight_on(dev); } intel_dp->dpms_mode = mode; } @@ -917,14 +979,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) * link status information */ static bool -intel_dp_get_link_status(struct intel_dp *intel_dp, - uint8_t link_status[DP_LINK_STATUS_SIZE]) +intel_dp_get_link_status(struct intel_dp *intel_dp) { int ret; ret = intel_dp_aux_native_read(intel_dp, DP_LANE0_1_STATUS, - link_status, DP_LINK_STATUS_SIZE); + intel_dp->link_status, DP_LINK_STATUS_SIZE); if (ret != DP_LINK_STATUS_SIZE) return false; return true; @@ -999,18 +1060,15 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) } static void -intel_get_adjust_train(struct intel_dp *intel_dp, - uint8_t link_status[DP_LINK_STATUS_SIZE], - int lane_count, - uint8_t train_set[4]) +intel_get_adjust_train(struct intel_dp *intel_dp) { uint8_t v = 0; uint8_t p = 0; int lane; - for (lane = 0; lane < lane_count; lane++) { - uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); - uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); + for (lane = 0; lane < intel_dp->lane_count; lane++) { + uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); + uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); if (this_v > v) v = this_v; @@ -1025,15 +1083,25 @@ intel_get_adjust_train(struct intel_dp *intel_dp, p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; for (lane = 0; lane < 4; lane++) - train_set[lane] = v | p; + intel_dp->train_set[lane] = v | p; } static uint32_t -intel_dp_signal_levels(uint8_t train_set, int lane_count) +intel_dp_signal_levels(struct intel_dp *intel_dp) { - uint32_t signal_levels = 0; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t signal_levels = 0; + u8 train_set = intel_dp->train_set[0]; + u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; + u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; - switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + if (is_edp(intel_dp)) { + vswing = dev_priv->edp.vswing; + preemphasis = dev_priv->edp.preemphasis; + } + + switch (vswing) { case DP_TRAIN_VOLTAGE_SWING_400: default: signal_levels |= DP_VOLTAGE_0_4; @@ -1048,7 +1116,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) signal_levels |= DP_VOLTAGE_1_2; break; } - switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + switch (preemphasis) { case DP_TRAIN_PRE_EMPHASIS_0: default: signal_levels |= DP_PRE_EMPHASIS_0; @@ -1116,178 +1184,213 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count DP_LANE_CHANNEL_EQ_DONE|\ DP_LANE_SYMBOL_LOCKED) static bool -intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +intel_channel_eq_ok(struct intel_dp *intel_dp) { uint8_t lane_align; uint8_t lane_status; int lane; - lane_align = intel_dp_link_status(link_status, + lane_align = intel_dp_link_status(intel_dp->link_status, DP_LANE_ALIGN_STATUS_UPDATED); if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) return false; - for (lane = 0; lane < lane_count; lane++) { - lane_status = intel_get_lane_status(link_status, lane); + for (lane = 0; lane < intel_dp->lane_count; lane++) { + lane_status = intel_get_lane_status(intel_dp->link_status, lane); if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) return false; } return true; } +static bool +intel_dp_aux_handshake_required(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (is_edp(intel_dp) && dev_priv->no_aux_handshake) + return false; + + return true; +} + static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, - uint8_t dp_train_pat, - uint8_t train_set[4]) + uint8_t dp_train_pat) { - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); + if (!intel_dp_aux_handshake_required(intel_dp)) + return true; + intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); ret = intel_dp_aux_native_write(intel_dp, - DP_TRAINING_LANE0_SET, train_set, 4); + DP_TRAINING_LANE0_SET, + intel_dp->train_set, 4); if (ret != 4) return false; return true; } +/* Enable corresponding port and start training pattern 1 */ static void -intel_dp_link_train(struct intel_dp *intel_dp) +intel_dp_start_link_train(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint8_t train_set[4]; - uint8_t link_status[DP_LINK_STATUS_SIZE]; + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); int i; uint8_t voltage; bool clock_recovery = false; - bool channel_eq = false; int tries; u32 reg; uint32_t DP = intel_dp->DP; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); /* Enable output, wait for it to become active */ I915_WRITE(intel_dp->output_reg, intel_dp->DP); POSTING_READ(intel_dp->output_reg); intel_wait_for_vblank(dev, intel_crtc->pipe); - /* Write the link configuration data */ - intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, - intel_dp->link_configuration, - DP_LINK_CONFIGURATION_SIZE); + if (intel_dp_aux_handshake_required(intel_dp)) + /* Write the link configuration data */ + intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, + intel_dp->link_configuration, + DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) DP &= ~DP_LINK_TRAIN_MASK_CPT; else DP &= ~DP_LINK_TRAIN_MASK; - memset(train_set, 0, 4); + memset(intel_dp->train_set, 0, 4); voltage = 0xff; tries = 0; clock_recovery = false; for (;;) { - /* Use train_set[0] to set the voltage and pre emphasis values */ + /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; - if (IS_GEN6(dev) && IS_eDP(intel_dp)) { - signal_levels = intel_gen6_edp_signal_levels(train_set[0]); + if (IS_GEN6(dev) && is_edp(intel_dp)) { + signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_1_CPT; else reg = DP | DP_LINK_TRAIN_PAT_1; if (!intel_dp_set_link_train(intel_dp, reg, - DP_TRAINING_PATTERN_1, train_set)) + DP_TRAINING_PATTERN_1)) break; /* Set training pattern 1 */ - udelay(100); - if (!intel_dp_get_link_status(intel_dp, link_status)) + udelay(500); + if (intel_dp_aux_handshake_required(intel_dp)) { break; + } else { + if (!intel_dp_get_link_status(intel_dp)) + break; - if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { - clock_recovery = true; - break; + if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + clock_recovery = true; + break; + } + + /* Check to see if we've tried the max voltage */ + for (i = 0; i < intel_dp->lane_count; i++) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + if (i == intel_dp->lane_count) + break; + + /* Check to see if we've tried the same voltage 5 times */ + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) + break; + } else + tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); } - - /* Check to see if we've tried the max voltage */ - for (i = 0; i < intel_dp->lane_count; i++) - if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) - break; - if (i == intel_dp->lane_count) - break; - - /* Check to see if we've tried the same voltage 5 times */ - if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++tries; - if (tries == 5) - break; - } else - tries = 0; - voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - - /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); } + intel_dp->DP = DP; +} + +static void +intel_dp_complete_link_train(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + bool channel_eq = false; + int tries; + u32 reg; + uint32_t DP = intel_dp->DP; + /* channel equalization */ tries = 0; channel_eq = false; for (;;) { - /* Use train_set[0] to set the voltage and pre emphasis values */ + /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; - if (IS_GEN6(dev) && IS_eDP(intel_dp)) { - signal_levels = intel_gen6_edp_signal_levels(train_set[0]); + if (IS_GEN6(dev) && is_edp(intel_dp)) { + signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_2_CPT; else reg = DP | DP_LINK_TRAIN_PAT_2; /* channel eq pattern */ if (!intel_dp_set_link_train(intel_dp, reg, - DP_TRAINING_PATTERN_2, train_set)) + DP_TRAINING_PATTERN_2)) break; - udelay(400); - if (!intel_dp_get_link_status(intel_dp, link_status)) - break; + udelay(500); - if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) { - channel_eq = true; + if (!intel_dp_aux_handshake_required(intel_dp)) { break; + } else { + if (!intel_dp_get_link_status(intel_dp)) + break; + + if (intel_channel_eq_ok(intel_dp)) { + channel_eq = true; + break; + } + + /* Try 5 times */ + if (tries > 5) + break; + + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); + ++tries; } - - /* Try 5 times */ - if (tries > 5) - break; - - /* Compute new train_set as requested by target */ - intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set); - ++tries; } - - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_OFF_CPT; else reg = DP | DP_LINK_TRAIN_OFF; @@ -1301,32 +1404,31 @@ intel_dp_link_train(struct intel_dp *intel_dp) static void intel_dp_link_down(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; DRM_DEBUG_KMS("\n"); - if (IS_eDP(intel_dp)) { + if (is_edp(intel_dp)) { DP &= ~DP_PLL_ENABLE; I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); udelay(100); } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); - POSTING_READ(intel_dp->output_reg); } else { DP &= ~DP_LINK_TRAIN_MASK; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); - POSTING_READ(intel_dp->output_reg); } + POSTING_READ(intel_dp->output_reg); - udelay(17000); + msleep(17); - if (IS_eDP(intel_dp)) + if (is_edp(intel_dp)) DP |= DP_LINK_TRAIN_OFF; I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); POSTING_READ(intel_dp->output_reg); @@ -1344,32 +1446,34 @@ intel_dp_link_down(struct intel_dp *intel_dp) static void intel_dp_check_link_status(struct intel_dp *intel_dp) { - uint8_t link_status[DP_LINK_STATUS_SIZE]; - - if (!intel_dp->base.enc.crtc) + if (!intel_dp->base.base.crtc) return; - if (!intel_dp_get_link_status(intel_dp, link_status)) { + if (!intel_dp_get_link_status(intel_dp)) { intel_dp_link_down(intel_dp); return; } - if (!intel_channel_eq_ok(link_status, intel_dp->lane_count)) - intel_dp_link_train(intel_dp); + if (!intel_channel_eq_ok(intel_dp)) { + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + } } static enum drm_connector_status -ironlake_dp_detect(struct drm_connector *connector) +ironlake_dp_detect(struct intel_dp *intel_dp) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum drm_connector_status status; + /* Can't disconnect eDP */ + if (is_edp(intel_dp)) + return connector_status_connected; + status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, - sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) - { + sizeof (intel_dp->dpcd)) + == sizeof(intel_dp->dpcd)) { if (intel_dp->dpcd[0] != 0) status = connector_status_connected; } @@ -1378,26 +1482,13 @@ ironlake_dp_detect(struct drm_connector *connector) return status; } -/** - * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. - * - * \return true if DP port is connected. - * \return false if DP port is disconnected. - */ static enum drm_connector_status -intel_dp_detect(struct drm_connector *connector, bool force) +g4x_dp_detect(struct intel_dp *intel_dp) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_device *dev = intel_dp->base.enc.dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t temp, bit; enum drm_connector_status status; - - intel_dp->has_audio = false; - - if (HAS_PCH_SPLIT(dev)) - return ironlake_dp_detect(connector); + uint32_t temp, bit; switch (intel_dp->output_reg) { case DP_B: @@ -1419,31 +1510,66 @@ intel_dp_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_dp, - 0x000, intel_dp->dpcd, + if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) { if (intel_dp->dpcd[0] != 0) status = connector_status_connected; } - return status; + + return bit; +} + +/** + * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. + * + * \return true if DP port is connected. + * \return false if DP port is disconnected. + */ +static enum drm_connector_status +intel_dp_detect(struct drm_connector *connector, bool force) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_device *dev = intel_dp->base.base.dev; + enum drm_connector_status status; + struct edid *edid = NULL; + + intel_dp->has_audio = false; + + if (HAS_PCH_SPLIT(dev)) + status = ironlake_dp_detect(intel_dp); + else + status = g4x_dp_detect(intel_dp); + if (status != connector_status_connected) + return status; + + if (intel_dp->force_audio) { + intel_dp->has_audio = intel_dp->force_audio > 0; + } else { + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + intel_dp->has_audio = drm_detect_monitor_audio(edid); + connector->display_info.raw_edid = NULL; + kfree(edid); + } + } + + return connector_status_connected; } static int intel_dp_get_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_device *dev = intel_dp->base.enc.dev; + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* We should parse the EDID data and find out if it has an audio sink */ - ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus); + ret = intel_ddc_get_modes(connector, &intel_dp->adapter); if (ret) { - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && - !dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { struct drm_display_mode *newmode; list_for_each_entry(newmode, &connector->probed_modes, head) { @@ -1459,7 +1585,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1470,6 +1596,46 @@ static int intel_dp_get_modes(struct drm_connector *connector) return 0; } +static int +intel_dp_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + int ret; + + ret = drm_connector_property_set_value(connector, property, val); + if (ret) + return ret; + + if (property == intel_dp->force_audio_property) { + if (val == intel_dp->force_audio) + return 0; + + intel_dp->force_audio = val; + + if (val > 0 && intel_dp->has_audio) + return 0; + if (val < 0 && !intel_dp->has_audio) + return 0; + + intel_dp->has_audio = val > 0; + goto done; + } + + return -EINVAL; + +done: + if (intel_dp->base.base.crtc) { + struct drm_crtc *crtc = intel_dp->base.base.crtc; + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); + } + + return 0; +} + static void intel_dp_destroy (struct drm_connector *connector) { @@ -1478,6 +1644,15 @@ intel_dp_destroy (struct drm_connector *connector) kfree(connector); } +static void intel_dp_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + i2c_del_adapter(&intel_dp->adapter); + drm_encoder_cleanup(encoder); + kfree(intel_dp); +} + static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { .dpms = intel_dp_dpms, .mode_fixup = intel_dp_mode_fixup, @@ -1490,20 +1665,21 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_dp_set_property, .destroy = intel_dp_destroy, }; static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { - .destroy = intel_encoder_destroy, + .destroy = intel_dp_encoder_destroy, }; -void +static void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); @@ -1554,6 +1730,20 @@ bool intel_dpd_is_edp(struct drm_device *dev) return false; } +static void +intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + intel_dp->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (intel_dp->force_audio_property) { + intel_dp->force_audio_property->values[0] = -1; + intel_dp->force_audio_property->values[1] = 1; + drm_connector_attach_property(connector, intel_dp->force_audio_property, 0); + } +} + void intel_dp_init(struct drm_device *dev, int output_reg) { @@ -1580,7 +1770,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) if (intel_dpd_is_edp(dev)) intel_dp->is_pch_edp = true; - if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { + if (output_reg == DP_A || is_pch_edp(intel_dp)) { type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; } else { @@ -1601,7 +1791,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) else if (output_reg == DP_D || output_reg == PCH_DP_D) intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); - if (IS_eDP(intel_dp)) + if (is_edp(intel_dp)) intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); intel_encoder->crtc_mask = (1 << 0) | (1 << 1); @@ -1612,12 +1802,11 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_dp->has_audio = false; intel_dp->dpms_mode = DRM_MODE_DPMS_ON; - drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); + intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); /* Set up the DDC bus. */ @@ -1647,10 +1836,29 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_dp_i2c_init(intel_dp, intel_connector, name); - intel_encoder->ddc_bus = &intel_dp->adapter; + /* Cache some DPCD data in the eDP case */ + if (is_edp(intel_dp)) { + int ret; + bool was_on; + + was_on = ironlake_edp_panel_on(intel_dp); + ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, + intel_dp->dpcd, + sizeof(intel_dp->dpcd)); + if (ret == sizeof(intel_dp->dpcd)) { + if (intel_dp->dpcd[0] >= 0x11) + dev_priv->no_aux_handshake = intel_dp->dpcd[3] & + DP_NO_AUX_HANDSHAKE_LINK_TRAINING; + } else { + DRM_ERROR("failed to retrieve link info\n"); + } + if (!was_on) + ironlake_edp_panel_off(dev); + } + intel_encoder->hot_plug = intel_dp_hot_plug; - if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp)) { /* initialize panel mode from VBT if available for eDP */ if (dev_priv->lfp_lvds_vbt_mode) { dev_priv->panel_fixed_mode = @@ -1662,6 +1870,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) } } + intel_dp_add_properties(intel_dp, connector); + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8828b3ac6414..9af9f86a8765 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -26,14 +26,12 @@ #define __INTEL_DRV_H__ #include -#include -#include #include "i915_drv.h" #include "drm_crtc.h" - #include "drm_crtc_helper.h" +#include "drm_fb_helper.h" -#define wait_for(COND, MS, W) ({ \ +#define _wait_for(COND, MS, W) ({ \ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ int ret__ = 0; \ while (! (COND)) { \ @@ -41,11 +39,24 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if (W) msleep(W); \ + if (W && !in_dbg_master()) msleep(W); \ } \ ret__; \ }) +#define wait_for(COND, MS) _wait_for(COND, MS, 1) +#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) + +#define MSLEEP(x) do { \ + if (in_dbg_master()) \ + mdelay(x); \ + else \ + msleep(x); \ +} while(0) + +#define KHz(x) (1000*x) +#define MHz(x) KHz(1000*x) + /* * Display related stuff */ @@ -96,24 +107,39 @@ #define INTEL_DVO_CHIP_TMDS 2 #define INTEL_DVO_CHIP_TVOUT 4 -struct intel_i2c_chan { - struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ - u32 reg; /* GPIO reg */ - struct i2c_adapter adapter; - struct i2c_algo_bit_data algo; -}; +/* drm_display_mode->private_flags */ +#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) +#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) + +static inline void +intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, + int multiplier) +{ + mode->clock *= multiplier; + mode->private_flags |= multiplier; +} + +static inline int +intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) +{ + return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT; +} struct intel_framebuffer { struct drm_framebuffer base; struct drm_gem_object *obj; }; +struct intel_fbdev { + struct drm_fb_helper helper; + struct intel_framebuffer ifb; + struct list_head fbdev_list; + struct drm_display_mode *our_mode; +}; struct intel_encoder { - struct drm_encoder enc; + struct drm_encoder base; int type; - struct i2c_adapter *i2c_bus; - struct i2c_adapter *ddc_bus; bool load_detect_temp; bool needs_tv_clock; void (*hot_plug)(struct intel_encoder *); @@ -123,32 +149,7 @@ struct intel_encoder { struct intel_connector { struct drm_connector base; -}; - -struct intel_crtc; -struct intel_overlay { - struct drm_device *dev; - struct intel_crtc *crtc; - struct drm_i915_gem_object *vid_bo; - struct drm_i915_gem_object *old_vid_bo; - int active; - int pfit_active; - u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ - u32 color_key; - u32 brightness, contrast, saturation; - u32 old_xscale, old_yscale; - /* register access */ - u32 flip_addr; - struct drm_i915_gem_object *reg_bo; - void *virt_addr; - /* flip handling */ - uint32_t last_flip_req; - int hw_wedged; -#define HW_WEDGED 1 -#define NEEDS_WAIT_FOR_FLIP 2 -#define RELEASE_OLD_VID 3 -#define SWITCH_OFF_STAGE_1 4 -#define SWITCH_OFF_STAGE_2 5 + struct intel_encoder *encoder; }; struct intel_crtc { @@ -157,6 +158,7 @@ struct intel_crtc { enum plane plane; u8 lut_r[256], lut_g[256], lut_b[256]; int dpms_mode; + bool active; /* is the crtc on? independent of the dpms mode */ bool busy; /* is scanout buffer being updated frequently? */ struct timer_list idle_timer; bool lowfreq_avail; @@ -168,14 +170,53 @@ struct intel_crtc { uint32_t cursor_addr; int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; - bool cursor_visible, cursor_on; + bool cursor_visible; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) #define to_intel_connector(x) container_of(x, struct intel_connector, base) -#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) +#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) +#define DIP_TYPE_AVI 0x82 +#define DIP_VERSION_AVI 0x2 +#define DIP_LEN_AVI 13 + +struct dip_infoframe { + uint8_t type; /* HB0 */ + uint8_t ver; /* HB1 */ + uint8_t len; /* HB2 - body len, not including checksum */ + uint8_t ecc; /* Header ECC */ + uint8_t checksum; /* PB0 */ + union { + struct { + /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ + uint8_t Y_A_B_S; + /* PB2 - C 7:6, M 5:4, R 3:0 */ + uint8_t C_M_R; + /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ + uint8_t ITC_EC_Q_SC; + /* PB4 - VIC 6:0 */ + uint8_t VIC; + /* PB5 - PR 3:0 */ + uint8_t PR; + /* PB6 to PB13 */ + uint16_t top_bar_end; + uint16_t bottom_bar_start; + uint16_t left_bar_end; + uint16_t right_bar_start; + } avi; + uint8_t payload[27]; + } __attribute__ ((packed)) body; +} __attribute__((packed)); + +static inline struct drm_crtc * +intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->pipe_to_crtc_mapping[pipe]; +} + struct intel_unpin_work { struct work_struct work; struct drm_device *dev; @@ -186,16 +227,12 @@ struct intel_unpin_work { bool enable_stall_check; }; -struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, - const char *name); -void intel_i2c_destroy(struct i2c_adapter *adapter); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); -extern bool intel_ddc_probe(struct intel_encoder *intel_encoder); -void intel_i2c_quirk_set(struct drm_device *dev, bool enable); -void intel_i2c_reset_gmbus(struct drm_device *dev); +extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); +void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); @@ -205,32 +242,41 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern bool intel_pch_has_edp(struct drm_crtc *crtc); extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config (struct intel_encoder *, int *, int *); +extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); - +/* intel_panel.c */ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); extern void intel_pch_panel_fitting(struct drm_device *dev, int fitting_mode, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); +extern u32 intel_panel_get_max_backlight(struct drm_device *dev); +extern u32 intel_panel_get_backlight(struct drm_device *dev); +extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); -extern int intel_panel_fitter_pipe (struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare (struct drm_encoder *encoder); extern void intel_encoder_commit (struct drm_encoder *encoder); extern void intel_encoder_destroy(struct drm_encoder *encoder); -extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); +static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) +{ + return to_intel_connector(connector)->encoder; +} + +extern void intel_connector_attach_encoder(struct intel_connector *connector, + struct intel_encoder *encoder); +extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); -extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); +extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, struct drm_connector *connector, struct drm_display_mode *mode, @@ -252,7 +298,8 @@ extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, - struct drm_gem_object *obj); + struct drm_gem_object *obj, + bool pipelined); extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, @@ -267,9 +314,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); extern void intel_setup_overlay(struct drm_device *dev); extern void intel_cleanup_overlay(struct drm_device *dev); -extern int intel_overlay_switch_off(struct intel_overlay *overlay); -extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - int interruptible); +extern int intel_overlay_switch_off(struct intel_overlay *overlay, + bool interruptible); extern int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int intel_overlay_attrs(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 7c9ec1472d46..ea373283c93b 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .name = "ch7017", .dvo_reg = DVOC, .slave_addr = 0x75, - .gpio = GPIOE, + .gpio = GMBUS_PORT_DPB, .dev_ops = &ch7017_ops, } }; @@ -88,7 +88,13 @@ struct intel_dvo { static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base); + return container_of(encoder, struct intel_dvo, base.base); +} + +static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_dvo, base); } static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) @@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) static int intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector, bool force) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); - + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } static int intel_dvo_get_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; /* We should probably have an i2c driver get_modes function for those * devices which will have a fixed set of modes determined by the chip * (TV-out, for example), but for now with just TMDS and LVDS, * that's not the case. */ - intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus); + intel_ddc_get_modes(connector, + &dev_priv->gmbus[GMBUS_PORT_DPC].adapter); if (!list_empty(&connector->probed_modes)) return 1; @@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { .mode_valid = intel_dvo_mode_valid, .get_modes = intel_dvo_get_modes, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static void intel_dvo_enc_destroy(struct drm_encoder *encoder) @@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); struct drm_display_mode *mode = NULL; @@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector) struct drm_crtc *crtc; int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0; - crtc = intel_get_crtc_from_pipe(dev, pipe); + crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc) { mode = intel_crtc_mode_get(dev, crtc); if (mode) { @@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector) void intel_dvo_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_dvo *intel_dvo; struct intel_connector *intel_connector; - struct i2c_adapter *i2cbus = NULL; - int ret = 0; int i; int encoder_type = DRM_MODE_ENCODER_NONE; @@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev) } intel_encoder = &intel_dvo->base; - - /* Set up the DDC bus */ - intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); - if (!intel_encoder->ddc_bus) - goto free_intel; + drm_encoder_init(dev, &intel_encoder->base, + &intel_dvo_enc_funcs, encoder_type); /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { struct drm_connector *connector = &intel_connector->base; const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; + struct i2c_adapter *i2c; int gpio; /* Allow the I2C driver info to specify the GPIO to be used in @@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev) if (dvo->gpio != 0) gpio = dvo->gpio; else if (dvo->type == INTEL_DVO_CHIP_LVDS) - gpio = GPIOB; + gpio = GMBUS_PORT_SSC; else - gpio = GPIOE; + gpio = GMBUS_PORT_DPB; /* Set up the I2C bus necessary for the chip we're probing. * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ - if (i2cbus != NULL) - intel_i2c_destroy(i2cbus); - if (!(i2cbus = intel_i2c_create(dev, gpio, - gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { - continue; - } + i2c = &dev_priv->gmbus[gpio].adapter; intel_dvo->dev = *dvo; - ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus); - if (!ret) + if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) continue; intel_encoder->type = INTEL_OUTPUT_DVO; @@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_encoder_init(dev, &intel_encoder->enc, - &intel_dvo_enc_funcs, encoder_type); - drm_encoder_helper_add(&intel_encoder->enc, + drm_encoder_helper_add(&intel_encoder->base, &intel_dvo_helper_funcs); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); + intel_connector_attach_encoder(intel_connector, intel_encoder); if (dvo->type == INTEL_DVO_CHIP_LVDS) { /* For our LVDS chipsets, we should hopefully be able * to dig the fixed panel mode out of the BIOS data. @@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev) return; } - intel_i2c_destroy(intel_encoder->ddc_bus); - /* Didn't find a chip, so tear down. */ - if (i2cbus != NULL) - intel_i2c_destroy(i2cbus); -free_intel: + drm_encoder_cleanup(&intel_encoder->base); kfree(intel_dvo); kfree(intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index b61966c126d3..af2a1dddc28e 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -44,13 +44,6 @@ #include "i915_drm.h" #include "i915_drv.h" -struct intel_fbdev { - struct drm_fb_helper helper; - struct intel_framebuffer ifb; - struct list_head fbdev_list; - struct drm_display_mode *our_mode; -}; - static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, @@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_gem_object *fbo = NULL; struct drm_i915_gem_object *obj_priv; struct device *device = &dev->pdev->dev; - int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; + int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) @@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev, mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, fbo); + /* Flush everything out, we'll be doing GTT only from now on */ + ret = intel_pin_and_fence_fb_obj(dev, fbo, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } - /* Flush everything out, we'll be doing GTT only from now on */ - ret = i915_gem_object_set_to_gtt_domain(fbo, 1); - if (ret) { - DRM_ERROR("failed to bind fb: %d.\n", ret); - goto out_unpin; - } - info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; @@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; - if (IS_I9XX(dev)) + if (!IS_GEN2(dev)) info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2); else info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); @@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = { .fb_probe = intel_fb_find_or_create_single, }; -int intel_fbdev_destroy(struct drm_device *dev, - struct intel_fbdev *ifbdev) +static void intel_fbdev_destroy(struct drm_device *dev, + struct intel_fbdev *ifbdev) { struct fb_info *info; struct intel_framebuffer *ifb = &ifbdev->ifb; @@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev, drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) { - drm_gem_object_unreference(ifb->obj); + drm_gem_object_unreference_unlocked(ifb->obj); ifb->obj = NULL; } - - return 0; } int intel_fbdev_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 926934a482ec..0d0273e7b029 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -40,12 +40,76 @@ struct intel_hdmi { struct intel_encoder base; u32 sdvox_reg; + int ddc_bus; bool has_hdmi_sink; + bool has_audio; + int force_audio; + struct drm_property *force_audio_property; }; static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base); + return container_of(encoder, struct intel_hdmi, base.base); +} + +static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_hdmi, base); +} + +void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) +{ + uint8_t *data = (uint8_t *)avi_if; + uint8_t sum = 0; + unsigned i; + + avi_if->checksum = 0; + avi_if->ecc = 0; + + for (i = 0; i < sizeof(*avi_if); i++) + sum += data[i]; + + avi_if->checksum = 0x100 - sum; +} + +static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) +{ + struct dip_infoframe avi_if = { + .type = DIP_TYPE_AVI, + .ver = DIP_VERSION_AVI, + .len = DIP_LEN_AVI, + }; + uint32_t *data = (uint32_t *)&avi_if; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 port; + unsigned i; + + if (!intel_hdmi->has_hdmi_sink) + return; + + /* XXX first guess at handling video port, is this corrent? */ + if (intel_hdmi->sdvox_reg == SDVOB) + port = VIDEO_DIP_PORT_B; + else if (intel_hdmi->sdvox_reg == SDVOC) + port = VIDEO_DIP_PORT_C; + else + return; + + I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | + VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); + + intel_dip_infoframe_csum(&avi_if); + for (i = 0; i < sizeof(avi_if); i += 4) { + I915_WRITE(VIDEO_DIP_DATA, *data); + data++; + } + + I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | + VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | + VIDEO_DIP_ENABLE_AVI); } static void intel_hdmi_mode_set(struct drm_encoder *encoder, @@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) sdvox |= SDVO_HSYNC_ACTIVE_HIGH; - if (intel_hdmi->has_hdmi_sink) { + /* Required on CPT */ + if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) + sdvox |= HDMI_MODE_SELECT; + + if (intel_hdmi->has_audio) { sdvox |= SDVO_AUDIO_ENABLE; - if (HAS_PCH_CPT(dev)) - sdvox |= HDMI_MODE_SELECT; + sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; } if (intel_crtc->pipe == 1) { @@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, I915_WRITE(intel_hdmi->sdvox_reg, sdvox); POSTING_READ(intel_hdmi->sdvox_reg); + + intel_hdmi_set_avi_infoframe(encoder); } static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) @@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - struct edid *edid = NULL; + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct edid *edid; enum drm_connector_status status = connector_status_disconnected; intel_hdmi->has_hdmi_sink = false; - edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus); + intel_hdmi->has_audio = false; + edid = drm_get_edid(connector, + &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + intel_hdmi->has_audio = drm_detect_monitor_audio(edid); } connector->display_info.raw_edid = NULL; kfree(edid); } + if (status == connector_status_connected) { + if (intel_hdmi->force_audio) + intel_hdmi->has_audio = intel_hdmi->force_audio > 0; + } + return status; } static int intel_hdmi_get_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct drm_i915_private *dev_priv = connector->dev->dev_private; /* We should parse the EDID data and find out if it's an HDMI sink so * we can send audio to it. */ - return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus); + return intel_ddc_get_modes(connector, + &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); +} + +static int +intel_hdmi_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + int ret; + + ret = drm_connector_property_set_value(connector, property, val); + if (ret) + return ret; + + if (property == intel_hdmi->force_audio_property) { + if (val == intel_hdmi->force_audio) + return 0; + + intel_hdmi->force_audio = val; + + if (val > 0 && intel_hdmi->has_audio) + return 0; + if (val < 0 && !intel_hdmi->has_audio) + return 0; + + intel_hdmi->has_audio = val > 0; + goto done; + } + + return -EINVAL; + +done: + if (intel_hdmi->base.base.crtc) { + struct drm_crtc *crtc = intel_hdmi->base.base.crtc; + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); + } + + return 0; } static void intel_hdmi_destroy(struct drm_connector *connector) @@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_hdmi_set_property, .destroy = intel_hdmi_destroy, }; static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { .destroy = intel_encoder_destroy, }; +static void +intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + intel_hdmi->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (intel_hdmi->force_audio_property) { + intel_hdmi->force_audio_property->values[0] = -1; + intel_hdmi->force_audio_property->values[1] = 1; + drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0); + } +} + void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) } intel_encoder = &intel_hdmi->base; + drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, + DRM_MODE_ENCODER_TMDS); + connector = &intel_connector->base; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); @@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) /* Set up the DDC bus. */ if (sdvox_reg == SDVOB) { intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPB; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == SDVOC) { intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPC; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIB) { intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, - "HDMIB"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPB; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMIC) { intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, - "HDMIC"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPC; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; } else if (sdvox_reg == HDMID) { intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); - intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, - "HDMID"); + intel_hdmi->ddc_bus = GMBUS_PORT_DPD; dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; } - if (!intel_encoder->ddc_bus) - goto err_connector; intel_hdmi->sdvox_reg = sdvox_reg; - drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); - drm_mode_connector_attach_encoder(&intel_connector->base, - &intel_encoder->enc); + intel_hdmi_add_properties(intel_hdmi, connector); + + intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written @@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) u32 temp = I915_READ(PEG_BAND_GAP_DATA); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } - - return; - -err_connector: - drm_connector_cleanup(connector); - kfree(intel_hdmi); - kfree(intel_connector); - - return; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index c2649c7df14c..2be4f728ed0c 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2006 Dave Airlie - * Copyright © 2006-2008 Intel Corporation + * Copyright © 2006-2008,2010 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a @@ -24,10 +24,9 @@ * * Authors: * Eric Anholt + * Chris Wilson */ #include -#include -#include #include #include "drmP.h" #include "drm.h" @@ -35,79 +34,106 @@ #include "i915_drm.h" #include "i915_drv.h" -void intel_i2c_quirk_set(struct drm_device *dev, bool enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - /* When using bit bashing for I2C, this bit needs to be set to 1 */ - if (!IS_PINEVIEW(dev)) - return; - if (enable) - I915_WRITE(DSPCLK_GATE_D, - I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE); - else - I915_WRITE(DSPCLK_GATE_D, - I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE)); -} - -/* - * Intel GPIO access functions - */ +/* Intel GPIO access functions */ #define I2C_RISEFALL_TIME 20 -static int get_clock(void *data) +static inline struct intel_gmbus * +to_intel_gmbus(struct i2c_adapter *i2c) +{ + return container_of(i2c, struct intel_gmbus, adapter); +} + +struct intel_gpio { + struct i2c_adapter adapter; + struct i2c_algo_bit_data algo; + struct drm_i915_private *dev_priv; + u32 reg; +}; + +void +intel_i2c_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (HAS_PCH_SPLIT(dev)) + I915_WRITE(PCH_GMBUS0, 0); + else + I915_WRITE(GMBUS0, 0); +} + +static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) { - struct intel_i2c_chan *chan = data; - struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; u32 val; - val = I915_READ(chan->reg); - return ((val & GPIO_CLOCK_VAL_IN) != 0); + /* When using bit bashing for I2C, this bit needs to be set to 1 */ + if (!IS_PINEVIEW(dev_priv->dev)) + return; + + val = I915_READ(DSPCLK_GATE_D); + if (enable) + val |= DPCUNIT_CLOCK_GATE_DISABLE; + else + val &= ~DPCUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); +} + +static u32 get_reserved(struct intel_gpio *gpio) +{ + struct drm_i915_private *dev_priv = gpio->dev_priv; + struct drm_device *dev = dev_priv->dev; + u32 reserved = 0; + + /* On most chips, these bits must be preserved in software. */ + if (!IS_I830(dev) && !IS_845G(dev)) + reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | + GPIO_CLOCK_PULLUP_DISABLE); + + return reserved; +} + +static int get_clock(void *data) +{ + struct intel_gpio *gpio = data; + struct drm_i915_private *dev_priv = gpio->dev_priv; + u32 reserved = get_reserved(gpio); + I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); + I915_WRITE(gpio->reg, reserved); + return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { - struct intel_i2c_chan *chan = data; - struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; - u32 val; - - val = I915_READ(chan->reg); - return ((val & GPIO_DATA_VAL_IN) != 0); + struct intel_gpio *gpio = data; + struct drm_i915_private *dev_priv = gpio->dev_priv; + u32 reserved = get_reserved(gpio); + I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); + I915_WRITE(gpio->reg, reserved); + return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { - struct intel_i2c_chan *chan = data; - struct drm_device *dev = chan->drm_dev; - struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; - u32 reserved = 0, clock_bits; - - /* On most chips, these bits must be preserved in software. */ - if (!IS_I830(dev) && !IS_845G(dev)) - reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | - GPIO_CLOCK_PULLUP_DISABLE); + struct intel_gpio *gpio = data; + struct drm_i915_private *dev_priv = gpio->dev_priv; + u32 reserved = get_reserved(gpio); + u32 clock_bits; if (state_high) clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; else clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; - I915_WRITE(chan->reg, reserved | clock_bits); - udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ + + I915_WRITE(gpio->reg, reserved | clock_bits); + POSTING_READ(gpio->reg); } static void set_data(void *data, int state_high) { - struct intel_i2c_chan *chan = data; - struct drm_device *dev = chan->drm_dev; - struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; - u32 reserved = 0, data_bits; - - /* On most chips, these bits must be preserved in software. */ - if (!IS_I830(dev) && !IS_845G(dev)) - reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | - GPIO_CLOCK_PULLUP_DISABLE); + struct intel_gpio *gpio = data; + struct drm_i915_private *dev_priv = gpio->dev_priv; + u32 reserved = get_reserved(gpio); + u32 data_bits; if (state_high) data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; @@ -115,109 +141,313 @@ static void set_data(void *data, int state_high) data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; - I915_WRITE(chan->reg, reserved | data_bits); - udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ + I915_WRITE(gpio->reg, reserved | data_bits); + POSTING_READ(gpio->reg); } -/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C - * engine, but if the BIOS leaves it enabled, then that can break our use - * of the bit-banging I2C interfaces. This is notably the case with the - * Mac Mini in EFI mode. - */ -void -intel_i2c_reset_gmbus(struct drm_device *dev) +static struct i2c_adapter * +intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) { - struct drm_i915_private *dev_priv = dev->dev_private; + static const int map_pin_to_reg[] = { + 0, + GPIOB, + GPIOA, + GPIOC, + GPIOD, + GPIOE, + 0, + GPIOF, + }; + struct intel_gpio *gpio; - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_GMBUS0, 0); - } else { - I915_WRITE(GMBUS0, 0); - } -} + if (pin < 1 || pin > 7) + return NULL; -/** - * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg - * @dev: DRM device - * @output: driver specific output device - * @reg: GPIO reg to use - * @name: name for this bus - * @slave_addr: slave address (if fixed) - * - * Creates and registers a new i2c bus with the Linux i2c layer, for use - * in output probing and control (e.g. DDC or SDVO control functions). - * - * Possible values for @reg include: - * %GPIOA - * %GPIOB - * %GPIOC - * %GPIOD - * %GPIOE - * %GPIOF - * %GPIOG - * %GPIOH - * see PRM for details on how these different busses are used. - */ -struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, - const char *name) -{ - struct intel_i2c_chan *chan; + gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); + if (gpio == NULL) + return NULL; - chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL); - if (!chan) + gpio->reg = map_pin_to_reg[pin]; + if (HAS_PCH_SPLIT(dev_priv->dev)) + gpio->reg += PCH_GPIOA - GPIOA; + gpio->dev_priv = dev_priv; + + snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]); + gpio->adapter.owner = THIS_MODULE; + gpio->adapter.algo_data = &gpio->algo; + gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; + gpio->algo.setsda = set_data; + gpio->algo.setscl = set_clock; + gpio->algo.getsda = get_data; + gpio->algo.getscl = get_clock; + gpio->algo.udelay = I2C_RISEFALL_TIME; + gpio->algo.timeout = usecs_to_jiffies(2200); + gpio->algo.data = gpio; + + if (i2c_bit_add_bus(&gpio->adapter)) goto out_free; - chan->drm_dev = dev; - chan->reg = reg; - snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); - chan->adapter.owner = THIS_MODULE; - chan->adapter.algo_data = &chan->algo; - chan->adapter.dev.parent = &dev->pdev->dev; - chan->algo.setsda = set_data; - chan->algo.setscl = set_clock; - chan->algo.getsda = get_data; - chan->algo.getscl = get_clock; - chan->algo.udelay = 20; - chan->algo.timeout = usecs_to_jiffies(2200); - chan->algo.data = chan; - - i2c_set_adapdata(&chan->adapter, chan); - - if(i2c_bit_add_bus(&chan->adapter)) - goto out_free; - - intel_i2c_reset_gmbus(dev); - - /* JJJ: raise SCL and SDA? */ - intel_i2c_quirk_set(dev, true); - set_data(chan, 1); - set_clock(chan, 1); - intel_i2c_quirk_set(dev, false); - udelay(20); - - return &chan->adapter; + return &gpio->adapter; out_free: - kfree(chan); + kfree(gpio); return NULL; } -/** - * intel_i2c_destroy - unregister and free i2c bus resources - * @output: channel to free - * - * Unregister the adapter from the i2c layer, then free the structure. - */ -void intel_i2c_destroy(struct i2c_adapter *adapter) +static int +intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv, + struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) { - struct intel_i2c_chan *chan; + struct intel_gpio *gpio = container_of(adapter, + struct intel_gpio, + adapter); + int ret; - if (!adapter) + intel_i2c_reset(dev_priv->dev); + + intel_i2c_quirk_set(dev_priv, true); + set_data(gpio, 1); + set_clock(gpio, 1); + udelay(I2C_RISEFALL_TIME); + + ret = adapter->algo->master_xfer(adapter, msgs, num); + + set_data(gpio, 1); + set_clock(gpio, 1); + intel_i2c_quirk_set(dev_priv, false); + + return ret; +} + +static int +gmbus_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = adapter->algo_data; + int i, reg_offset; + + if (bus->force_bit) + return intel_i2c_quirk_xfer(dev_priv, + bus->force_bit, msgs, num); + + reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; + + I915_WRITE(GMBUS0 + reg_offset, bus->reg0); + + for (i = 0; i < num; i++) { + u16 len = msgs[i].len; + u8 *buf = msgs[i].buf; + + if (msgs[i].flags & I2C_M_RD) { + I915_WRITE(GMBUS1 + reg_offset, + GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | + (len << GMBUS_BYTE_COUNT_SHIFT) | + (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_READ | GMBUS_SW_RDY); + POSTING_READ(GMBUS2+reg_offset); + do { + u32 val, loop = 0; + + if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) + goto timeout; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return 0; + + val = I915_READ(GMBUS3 + reg_offset); + do { + *buf++ = val & 0xff; + val >>= 8; + } while (--len && ++loop < 4); + } while (len); + } else { + u32 val, loop; + + val = loop = 0; + do { + val |= *buf++ << (8 * loop); + } while (--len && ++loop < 4); + + I915_WRITE(GMBUS3 + reg_offset, val); + I915_WRITE(GMBUS1 + reg_offset, + (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) | + (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | + (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | + GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); + POSTING_READ(GMBUS2+reg_offset); + + while (len) { + if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) + goto timeout; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return 0; + + val = loop = 0; + do { + val |= *buf++ << (8 * loop); + } while (--len && ++loop < 4); + + I915_WRITE(GMBUS3 + reg_offset, val); + POSTING_READ(GMBUS2+reg_offset); + } + } + + if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) + goto timeout; + if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) + return 0; + } + + return num; + +timeout: + DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", + bus->reg0 & 0xff, bus->adapter.name); + /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ + bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); + if (!bus->force_bit) + return -ENOMEM; + + return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num); +} + +static u32 gmbus_func(struct i2c_adapter *adapter) +{ + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + + if (bus->force_bit) + bus->force_bit->algo->functionality(bus->force_bit); + + return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + /* I2C_FUNC_10BIT_ADDR | */ + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL); +} + +static const struct i2c_algorithm gmbus_algorithm = { + .master_xfer = gmbus_xfer, + .functionality = gmbus_func +}; + +/** + * intel_gmbus_setup - instantiate all Intel i2c GMBuses + * @dev: DRM device + */ +int intel_setup_gmbus(struct drm_device *dev) +{ + static const char *names[GMBUS_NUM_PORTS] = { + "disabled", + "ssc", + "vga", + "panel", + "dpc", + "dpb", + "reserved" + "dpd", + }; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i; + + dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS, + GFP_KERNEL); + if (dev_priv->gmbus == NULL) + return -ENOMEM; + + for (i = 0; i < GMBUS_NUM_PORTS; i++) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + + bus->adapter.owner = THIS_MODULE; + bus->adapter.class = I2C_CLASS_DDC; + snprintf(bus->adapter.name, + I2C_NAME_SIZE, + "gmbus %s", + names[i]); + + bus->adapter.dev.parent = &dev->pdev->dev; + bus->adapter.algo_data = dev_priv; + + bus->adapter.algo = &gmbus_algorithm; + ret = i2c_add_adapter(&bus->adapter); + if (ret) + goto err; + + /* By default use a conservative clock rate */ + bus->reg0 = i | GMBUS_RATE_100KHZ; + + /* XXX force bit banging until GMBUS is fully debugged */ + bus->force_bit = intel_gpio_create(dev_priv, i); + } + + intel_i2c_reset(dev_priv->dev); + + return 0; + +err: + while (--i) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + i2c_del_adapter(&bus->adapter); + } + kfree(dev_priv->gmbus); + dev_priv->gmbus = NULL; + return ret; +} + +void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) +{ + struct intel_gmbus *bus = to_intel_gmbus(adapter); + + /* speed: + * 0x0 = 100 KHz + * 0x1 = 50 KHz + * 0x2 = 400 KHz + * 0x3 = 1000 Khz + */ + bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8); +} + +void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) +{ + struct intel_gmbus *bus = to_intel_gmbus(adapter); + + if (force_bit) { + if (bus->force_bit == NULL) { + struct drm_i915_private *dev_priv = adapter->algo_data; + bus->force_bit = intel_gpio_create(dev_priv, + bus->reg0 & 0xff); + } + } else { + if (bus->force_bit) { + i2c_del_adapter(bus->force_bit); + kfree(bus->force_bit); + bus->force_bit = NULL; + } + } +} + +void intel_teardown_gmbus(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + if (dev_priv->gmbus == NULL) return; - chan = container_of(adapter, - struct intel_i2c_chan, - adapter); - i2c_del_adapter(&chan->adapter); - kfree(chan); + for (i = 0; i < GMBUS_NUM_PORTS; i++) { + struct intel_gmbus *bus = &dev_priv->gmbus[i]; + if (bus->force_bit) { + i2c_del_adapter(bus->force_bit); + kfree(bus->force_bit); + } + i2c_del_adapter(&bus->adapter); + } + + kfree(dev_priv->gmbus); + dev_priv->gmbus = NULL; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6ec39a86ed06..f1a649990ea9 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -43,102 +43,76 @@ /* Private structure for the integrated LVDS support */ struct intel_lvds { struct intel_encoder base; + + struct edid *edid; + int fitting_mode; u32 pfit_control; u32 pfit_pgm_ratios; + bool pfit_dirty; + + struct drm_display_mode *fixed_mode; }; -static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder) +static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base); + return container_of(encoder, struct intel_lvds, base.base); } -/** - * Sets the backlight level. - * - * \param level backlight level, from 0 to intel_lvds_get_max_backlight(). - */ -static void intel_lvds_set_backlight(struct drm_device *dev, int level) +static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) { - struct drm_i915_private *dev_priv = dev->dev_private; - u32 blc_pwm_ctl, reg; - - if (HAS_PCH_SPLIT(dev)) - reg = BLC_PWM_CPU_CTL; - else - reg = BLC_PWM_CTL; - - blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(reg, (blc_pwm_ctl | - (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); -} - -/** - * Returns the maximum level of the backlight duty cycle field. - */ -static u32 intel_lvds_get_max_backlight(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg; - - if (HAS_PCH_SPLIT(dev)) - reg = BLC_PWM_PCH_CTL2; - else - reg = BLC_PWM_CTL; - - return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >> - BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; + return container_of(intel_attached_encoder(connector), + struct intel_lvds, base); } /** * Sets the power state for the panel. */ -static void intel_lvds_set_power(struct drm_device *dev, bool on) +static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) { + struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 ctl_reg, status_reg, lvds_reg; + u32 ctl_reg, lvds_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; - status_reg = PCH_PP_STATUS; lvds_reg = PCH_LVDS; } else { ctl_reg = PP_CONTROL; - status_reg = PP_STATUS; lvds_reg = LVDS; } if (on) { I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); - POSTING_READ(lvds_reg); - - I915_WRITE(ctl_reg, I915_READ(ctl_reg) | - POWER_TARGET_ON); - if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0)) - DRM_ERROR("timed out waiting to enable LVDS pipe"); - - intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); + intel_panel_set_backlight(dev, dev_priv->backlight_level); } else { - intel_lvds_set_backlight(dev, 0); + dev_priv->backlight_level = intel_panel_get_backlight(dev); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) & - ~POWER_TARGET_ON); - if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0)) - DRM_ERROR("timed out waiting for LVDS pipe to turn off"); + intel_panel_set_backlight(dev, 0); + I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); + + if (intel_lvds->pfit_control) { + if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) + DRM_ERROR("timed out waiting for panel to power off\n"); + I915_WRITE(PFIT_CONTROL, 0); + intel_lvds->pfit_control = 0; + intel_lvds->pfit_dirty = false; + } I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); - POSTING_READ(lvds_reg); } + POSTING_READ(lvds_reg); } static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) { - struct drm_device *dev = encoder->dev; + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); if (mode == DRM_MODE_DPMS_ON) - intel_lvds_set_power(dev, true); + intel_lvds_set_power(intel_lvds, true); else - intel_lvds_set_power(dev, false); + intel_lvds_set_power(intel_lvds, false); /* XXX: We never power down the LVDS pairs. */ } @@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) static int intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); + struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; - if (fixed_mode) { - if (mode->hdisplay > fixed_mode->hdisplay) - return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) - return MODE_PANEL; - } + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; return MODE_OK; } @@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct drm_encoder *tmp_encoder; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; /* Should never happen!! */ - if (!IS_I965G(dev) && intel_crtc->pipe == 0) { + if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) { DRM_ERROR("Can't support LVDS on pipe A\n"); return false; } @@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return false; } } - /* If we don't have a panel mode, there is nothing we can do */ - if (dev_priv->panel_fixed_mode == NULL) - return true; /* * We have timings from the BIOS for the panel, put them in @@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); + intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); if (HAS_PCH_SPLIT(dev)) { intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, @@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, } /* Make sure pre-965s set dither correctly */ - if (!IS_I965G(dev)) { - if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) + if (INTEL_INFO(dev)->gen < 4) { + if (dev_priv->lvds_dither) pfit_control |= PANEL_8TO6_DITHER_ENABLE; } @@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, goto out; /* 965+ wants fuzzy fitting */ - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | PFIT_FILTER_FUZZY); @@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, case DRM_MODE_SCALE_ASPECT: /* Scale but preserve the aspect ratio */ - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; @@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * Fortunately this is all done for us in hw. */ pfit_control |= PFIT_ENABLE; - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) pfit_control |= PFIT_SCALING_AUTO; else pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | @@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, } out: - intel_lvds->pfit_control = pfit_control; - intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; + if (pfit_control != intel_lvds->pfit_control || + pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { + intel_lvds->pfit_control = pfit_control; + intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; + intel_lvds->pfit_dirty = true; + } dev_priv->lvds_border_bits = border; /* @@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg; + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - if (HAS_PCH_SPLIT(dev)) - reg = BLC_PWM_CPU_CTL; - else - reg = BLC_PWM_CTL; + dev_priv->backlight_level = intel_panel_get_backlight(dev); - dev_priv->saveBLC_PWM_CTL = I915_READ(reg); - dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & - BACKLIGHT_DUTY_CYCLE_MASK); + /* We try to do the minimum that is necessary in order to unlock + * the registers for mode setting. + * + * On Ironlake, this is quite simple as we just set the unlock key + * and ignore all subtleties. (This may cause some issues...) + * + * Prior to Ironlake, we must disable the pipe if we want to adjust + * the panel fitter. However at all other times we can just reset + * the registers regardless. + */ - intel_lvds_set_power(dev, false); + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(PCH_PP_CONTROL, + I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); + } else if (intel_lvds->pfit_dirty) { + I915_WRITE(PP_CONTROL, + (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS) + & ~POWER_TARGET_ON); + } else { + I915_WRITE(PP_CONTROL, + I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); + } } -static void intel_lvds_commit( struct drm_encoder *encoder) +static void intel_lvds_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - if (dev_priv->backlight_duty_cycle == 0) - dev_priv->backlight_duty_cycle = - intel_lvds_get_max_backlight(dev); + if (dev_priv->backlight_level == 0) + dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - intel_lvds_set_power(dev, true); + /* Undo any unlocking done in prepare to prevent accidental + * adjustment of the registers. + */ + if (HAS_PCH_SPLIT(dev)) { + u32 val = I915_READ(PCH_PP_CONTROL); + if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) + I915_WRITE(PCH_PP_CONTROL, val & 0x3); + } else { + u32 val = I915_READ(PP_CONTROL); + if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) + I915_WRITE(PP_CONTROL, val & 0x3); + } + + /* Always do a full power on as we do not know what state + * we were left in. + */ + intel_lvds_set_power(intel_lvds, true); } static void intel_lvds_mode_set(struct drm_encoder *encoder, @@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); /* * The LVDS pin pair will already have been turned on in the @@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, if (HAS_PCH_SPLIT(dev)) return; + if (!intel_lvds->pfit_dirty) + return; + /* * Enable automatic panel scaling so that non-native modes fill the * screen. Should be enabled before the pipe is enabled, according to * register description and PRM. */ + DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", + intel_lvds->pfit_control, + intel_lvds->pfit_pgm_ratios); + if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) + DRM_ERROR("timed out waiting for panel to power off\n"); + I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); + intel_lvds->pfit_dirty = false; } /** @@ -465,38 +477,22 @@ intel_lvds_detect(struct drm_connector *connector, bool force) */ static int intel_lvds_get_modes(struct drm_connector *connector) { + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); - struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; + struct drm_display_mode *mode; - if (dev_priv->lvds_edid_good) { - ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - - if (ret) - return ret; + if (intel_lvds->edid) { + drm_mode_connector_update_edid_property(connector, + intel_lvds->edid); + return drm_add_edid_modes(connector, intel_lvds->edid); } - /* Didn't get an EDID, so - * Set wide sync ranges so we get all modes - * handed to valid_mode for checking - */ - connector->display_info.min_vfreq = 0; - connector->display_info.max_vfreq = 200; - connector->display_info.min_hfreq = 0; - connector->display_info.max_hfreq = 200; + mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); + if (mode == 0) + return 0; - if (dev_priv->panel_fixed_mode != NULL) { - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); - drm_mode_probed_add(connector, mode); - - return 1; - } - - return 0; + drm_mode_probed_add(connector, mode); + return 1; } static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) @@ -587,18 +583,17 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { + struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct drm_device *dev = connector->dev; - if (property == dev->mode_config.scaling_mode_property && - connector->encoder) { - struct drm_crtc *crtc = connector->encoder->crtc; - struct drm_encoder *encoder = connector->encoder; - struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder); + if (property == dev->mode_config.scaling_mode_property) { + struct drm_crtc *crtc = intel_lvds->base.base.crtc; if (value == DRM_MODE_SCALE_NONE) { DRM_DEBUG_KMS("no scaling not supported\n"); - return 0; + return -EINVAL; } + if (intel_lvds->fitting_mode == value) { /* the LVDS scaling property is not changed */ return 0; @@ -628,7 +623,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { @@ -726,16 +721,14 @@ static const struct dmi_system_id intel_no_lvds[] = { * Find the reduced downclock for LVDS in EDID. */ static void intel_find_lvds_downclock(struct drm_device *dev, - struct drm_connector *connector) + struct drm_display_mode *fixed_mode, + struct drm_connector *connector) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_display_mode *scan, *panel_fixed_mode; + struct drm_display_mode *scan; int temp_downclock; - panel_fixed_mode = dev_priv->panel_fixed_mode; - temp_downclock = panel_fixed_mode->clock; - - mutex_lock(&dev->mode_config.mutex); + temp_downclock = fixed_mode->clock; list_for_each_entry(scan, &connector->probed_modes, head) { /* * If one mode has the same resolution with the fixed_panel @@ -744,14 +737,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev, * case we can set the different FPx0/1 to dynamically select * between low and high frequency. */ - if (scan->hdisplay == panel_fixed_mode->hdisplay && - scan->hsync_start == panel_fixed_mode->hsync_start && - scan->hsync_end == panel_fixed_mode->hsync_end && - scan->htotal == panel_fixed_mode->htotal && - scan->vdisplay == panel_fixed_mode->vdisplay && - scan->vsync_start == panel_fixed_mode->vsync_start && - scan->vsync_end == panel_fixed_mode->vsync_end && - scan->vtotal == panel_fixed_mode->vtotal) { + if (scan->hdisplay == fixed_mode->hdisplay && + scan->hsync_start == fixed_mode->hsync_start && + scan->hsync_end == fixed_mode->hsync_end && + scan->htotal == fixed_mode->htotal && + scan->vdisplay == fixed_mode->vdisplay && + scan->vsync_start == fixed_mode->vsync_start && + scan->vsync_end == fixed_mode->vsync_end && + scan->vtotal == fixed_mode->vtotal) { if (scan->clock < temp_downclock) { /* * The downclock is already found. But we @@ -761,17 +754,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev, } } } - mutex_unlock(&dev->mode_config.mutex); - if (temp_downclock < panel_fixed_mode->clock && - i915_lvds_downclock) { + if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) { /* We found the downclock for LVDS. */ dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = temp_downclock; DRM_DEBUG_KMS("LVDS downclock is found in EDID. " - "Normal clock %dKhz, downclock %dKhz\n", - panel_fixed_mode->clock, temp_downclock); + "Normal clock %dKhz, downclock %dKhz\n", + fixed_mode->clock, temp_downclock); } - return; } /* @@ -780,38 +770,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev, * If it is present, return 1. * If it is not present, return false. * If no child dev is parsed from VBT, it assumes that the LVDS is present. - * Note: The addin_offset should also be checked for LVDS panel. - * Only when it is non-zero, it is assumed that it is present. */ -static int lvds_is_present_in_vbt(struct drm_device *dev) +static bool lvds_is_present_in_vbt(struct drm_device *dev, + u8 *i2c_pin) { struct drm_i915_private *dev_priv = dev->dev_private; - struct child_device_config *p_child; - int i, ret; + int i; if (!dev_priv->child_dev_num) - return 1; + return true; - ret = 0; for (i = 0; i < dev_priv->child_dev_num; i++) { - p_child = dev_priv->child_dev + i; - /* - * If the device type is not LFP, continue. - * If the device type is 0x22, it is also regarded as LFP. + struct child_device_config *child = dev_priv->child_dev + i; + + /* If the device type is not LFP, continue. + * We have to check both the new identifiers as well as the + * old for compatibility with some BIOSes. */ - if (p_child->device_type != DEVICE_TYPE_INT_LFP && - p_child->device_type != DEVICE_TYPE_LFP) + if (child->device_type != DEVICE_TYPE_INT_LFP && + child->device_type != DEVICE_TYPE_LFP) continue; - /* The addin_offset should be checked. Only when it is - * non-zero, it is regarded as present. + if (child->i2c_pin) + *i2c_pin = child->i2c_pin; + + /* However, we cannot trust the BIOS writers to populate + * the VBT correctly. Since LVDS requires additional + * information from AIM blocks, a non-zero addin offset is + * a good indicator that the LVDS is actually present. */ - if (p_child->addin_offset) { - ret = 1; - break; - } + if (child->addin_offset) + return true; + + /* But even then some BIOS writers perform some black magic + * and instantiate the device without reference to any + * additional data. Trust that if the VBT was written into + * the OpRegion then they have validated the LVDS's existence. + */ + if (dev_priv->opregion.vbt) + return true; } - return ret; + + return false; +} + +static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u8 buf = 0; + struct i2c_msg msgs[] = { + { + .addr = 0xA0, + .flags = 0, + .len = 1, + .buf = &buf, + }, + }; + struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter; + /* XXX this only appears to work when using GMBUS */ + if (intel_gmbus_is_forced_bit(i2c)) + return true; + return i2c_transfer(i2c, msgs, 1) == 1; } /** @@ -832,13 +851,15 @@ void intel_lvds_init(struct drm_device *dev) struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_crtc *crtc; u32 lvds; - int pipe, gpio = GPIOC; + int pipe; + u8 pin; /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) return; - if (!lvds_is_present_in_vbt(dev)) { + pin = GMBUS_PORT_PANEL; + if (!lvds_is_present_in_vbt(dev, &pin)) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); return; } @@ -846,11 +867,15 @@ void intel_lvds_init(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) return; - if (dev_priv->edp_support) { + if (dev_priv->edp.support) { DRM_DEBUG_KMS("disable LVDS for eDP support\n"); return; } - gpio = PCH_GPIOC; + } + + if (!intel_lvds_ddc_probe(dev, pin)) { + DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); + return; } intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); @@ -864,16 +889,20 @@ void intel_lvds_init(struct drm_device *dev) return; } + if (!HAS_PCH_SPLIT(dev)) { + intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); + } + intel_encoder = &intel_lvds->base; - encoder = &intel_encoder->enc; + encoder = &intel_encoder->base; connector = &intel_connector->base; drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_LVDS; intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); @@ -904,43 +933,41 @@ void intel_lvds_init(struct drm_device *dev) * if closed, act like it's not there for now */ - /* Set up the DDC bus. */ - intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); - if (!intel_encoder->ddc_bus) { - dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " - "failed.\n"); - goto failed; - } - /* * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - dev_priv->lvds_edid_good = true; + intel_lvds->edid = drm_get_edid(connector, + &dev_priv->gmbus[pin].adapter); - if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus)) - dev_priv->lvds_edid_good = false; + if (!intel_lvds->edid) { + /* Didn't get an EDID, so + * Set wide sync ranges so we get all modes + * handed to valid_mode for checking + */ + connector->display_info.min_vfreq = 0; + connector->display_info.max_vfreq = 200; + connector->display_info.min_hfreq = 0; + connector->display_info.max_hfreq = 200; + } list_for_each_entry(scan, &connector->probed_modes, head) { - mutex_lock(&dev->mode_config.mutex); if (scan->type & DRM_MODE_TYPE_PREFERRED) { - dev_priv->panel_fixed_mode = + intel_lvds->fixed_mode = drm_mode_duplicate(dev, scan); - mutex_unlock(&dev->mode_config.mutex); - intel_find_lvds_downclock(dev, connector); + intel_find_lvds_downclock(dev, + intel_lvds->fixed_mode, + connector); goto out; } - mutex_unlock(&dev->mode_config.mutex); } /* Failed to get EDID, what about VBT? */ if (dev_priv->lfp_lvds_vbt_mode) { - mutex_lock(&dev->mode_config.mutex); - dev_priv->panel_fixed_mode = + intel_lvds->fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - mutex_unlock(&dev->mode_config.mutex); - if (dev_priv->panel_fixed_mode) { - dev_priv->panel_fixed_mode->type |= + if (intel_lvds->fixed_mode) { + intel_lvds->fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; } @@ -958,19 +985,19 @@ void intel_lvds_init(struct drm_device *dev) lvds = I915_READ(LVDS); pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; - crtc = intel_get_crtc_from_pipe(dev, pipe); + crtc = intel_get_crtc_for_pipe(dev, pipe); if (crtc && (lvds & LVDS_PORT_EN)) { - dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc); - if (dev_priv->panel_fixed_mode) { - dev_priv->panel_fixed_mode->type |= + intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); + if (intel_lvds->fixed_mode) { + intel_lvds->fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; goto out; } } /* If we still don't have a mode after all that, give up. */ - if (!dev_priv->panel_fixed_mode) + if (!intel_lvds->fixed_mode) goto failed; out: @@ -997,8 +1024,6 @@ out: failed: DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); - if (intel_encoder->ddc_bus) - intel_i2c_destroy(intel_encoder->ddc_bus); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); kfree(intel_lvds); diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 4b1fd3d9c73c..f70b7cf32bff 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2007 Dave Airlie - * Copyright (c) 2007 Intel Corporation + * Copyright (c) 2007, 2010 Intel Corporation * Jesse Barnes * * Permission is hereby granted, free of charge, to any person obtaining a @@ -34,11 +34,11 @@ * intel_ddc_probe * */ -bool intel_ddc_probe(struct intel_encoder *intel_encoder) +bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus) { + struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private; u8 out_buf[] = { 0x0, 0x0}; u8 buf[2]; - int ret; struct i2c_msg msgs[] = { { .addr = 0x50, @@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder) } }; - intel_i2c_quirk_set(intel_encoder->enc.dev, true); - ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2); - intel_i2c_quirk_set(intel_encoder->enc.dev, false); - if (ret == 2) - return true; - - return false; + return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2; } /** @@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector, struct edid *edid; int ret = 0; - intel_i2c_quirk_set(connector->dev, true); edid = drm_get_edid(connector, adapter); - intel_i2c_quirk_set(connector->dev, false); if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c similarity index 81% rename from drivers/gpu/drm/i915/i915_opregion.c rename to drivers/gpu/drm/i915/intel_opregion.c index ea5d3fea4b61..917c7dc3cd6b 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -31,17 +31,16 @@ #include "drmP.h" #include "i915_drm.h" #include "i915_drv.h" +#include "intel_drv.h" #define PCI_ASLE 0xe4 -#define PCI_LBPC 0xf4 #define PCI_ASLS 0xfc -#define OPREGION_SZ (8*1024) #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 #define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_ASLE_OFFSET 0x300 -#define OPREGION_VBT_OFFSET 0x1000 +#define OPREGION_VBT_OFFSET 0x400 #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_ACPI (1<<0) @@ -143,40 +142,22 @@ struct opregion_asle { #define ACPI_DIGITAL_OUTPUT (3<<8) #define ACPI_LVDS_OUTPUT (4<<8) +#ifdef CONFIG_ACPI static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; - u32 blc_pwm_ctl, blc_pwm_ctl2; - u32 max_backlight, level, shift; + u32 max; if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAILED; bclp &= ASLE_BCLP_MSK; - if (bclp < 0 || bclp > 255) + if (bclp > 255) return ASLE_BACKLIGHT_FAILED; - blc_pwm_ctl = I915_READ(BLC_PWM_CTL); - blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); - - if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) - pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); - else { - if (IS_PINEVIEW(dev)) { - blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); - max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> - BACKLIGHT_MODULATION_FREQ_SHIFT; - shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1; - } else { - blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; - max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> - BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; - shift = BACKLIGHT_DUTY_CYCLE_SHIFT; - } - level = (bclp * max_backlight) / 255; - I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift)); - } + max = intel_panel_get_max_backlight(dev); + intel_panel_set_backlight(dev, bclp * max / 255); asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; return 0; @@ -211,7 +192,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) return 0; } -void opregion_asle_intr(struct drm_device *dev) +void intel_opregion_asle_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; @@ -243,37 +224,8 @@ void opregion_asle_intr(struct drm_device *dev) asle->aslc = asle_stat; } -static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct opregion_asle *asle = dev_priv->opregion.asle; - u32 cpu_pwm_ctl, pch_pwm_ctl2; - u32 max_backlight, level; - - if (!(bclp & ASLE_BCLP_VALID)) - return ASLE_BACKLIGHT_FAILED; - - bclp &= ASLE_BCLP_MSK; - if (bclp < 0 || bclp > 255) - return ASLE_BACKLIGHT_FAILED; - - cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL); - pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); - /* get the max PWM frequency */ - max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK; - /* calculate the expected PMW frequency */ - level = (bclp * max_backlight) / 255; - /* reserve the high 16 bits */ - cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK); - /* write the updated PWM frequency */ - I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level); - - asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; - - return 0; -} - -void ironlake_opregion_gse_intr(struct drm_device *dev) +/* Only present on Ironlake+ */ +void intel_opregion_gse_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; @@ -296,7 +248,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev) } if (asle_req & ASLE_SET_BACKLIGHT) - asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp); + asle_stat |= asle_set_backlight(dev, asle->bclp); if (asle_req & ASLE_SET_PFIT) { DRM_DEBUG_DRIVER("Pfit is not supported\n"); @@ -315,7 +267,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev) #define ASLE_PFIT_EN (1<<2) #define ASLE_PFMB_EN (1<<3) -void opregion_enable_asle(struct drm_device *dev) +void intel_opregion_enable_asle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; @@ -464,7 +416,58 @@ blind_set: goto end; } -int intel_opregion_init(struct drm_device *dev, int resume) +void intel_opregion_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + + if (!opregion->header) + return; + + if (opregion->acpi) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_didl_outputs(dev); + + /* Notify BIOS we are ready to handle ACPI video ext notifs. + * Right now, all the events are handled by the ACPI video module. + * We don't actually need to do anything with them. */ + opregion->acpi->csts = 0; + opregion->acpi->drdy = 1; + + system_opregion = opregion; + register_acpi_notifier(&intel_opregion_notifier); + } + + if (opregion->asle) + intel_opregion_enable_asle(dev); +} + +void intel_opregion_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + + if (!opregion->header) + return; + + if (opregion->acpi) { + opregion->acpi->drdy = 0; + + system_opregion = NULL; + unregister_acpi_notifier(&intel_opregion_notifier); + } + + /* just clear all opregion memory pointers now */ + iounmap(opregion->header); + opregion->header = NULL; + opregion->acpi = NULL; + opregion->swsci = NULL; + opregion->asle = NULL; + opregion->vbt = NULL; +} +#endif + +int intel_opregion_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; @@ -479,29 +482,23 @@ int intel_opregion_init(struct drm_device *dev, int resume) return -ENOTSUPP; } - base = ioremap(asls, OPREGION_SZ); + base = ioremap(asls, OPREGION_SIZE); if (!base) return -ENOMEM; - opregion->header = base; - if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { + if (memcmp(base, OPREGION_SIGNATURE, 16)) { DRM_DEBUG_DRIVER("opregion signature mismatch\n"); err = -EINVAL; goto err_out; } + opregion->header = base; + opregion->vbt = base + OPREGION_VBT_OFFSET; mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; - if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_didl_outputs(dev); - } else { - DRM_DEBUG_DRIVER("Public ACPI methods not supported\n"); - err = -ENOTSUPP; - goto err_out; } - opregion->enabled = 1; if (mboxes & MBOX_SWSCI) { DRM_DEBUG_DRIVER("SWSCI supported\n"); @@ -510,53 +507,11 @@ int intel_opregion_init(struct drm_device *dev, int resume) if (mboxes & MBOX_ASLE) { DRM_DEBUG_DRIVER("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; - opregion_enable_asle(dev); } - if (!resume) - acpi_video_register(); - - - /* Notify BIOS we are ready to handle ACPI video ext notifs. - * Right now, all the events are handled by the ACPI video module. - * We don't actually need to do anything with them. */ - opregion->acpi->csts = 0; - opregion->acpi->drdy = 1; - - system_opregion = opregion; - register_acpi_notifier(&intel_opregion_notifier); - return 0; err_out: iounmap(opregion->header); - opregion->header = NULL; - acpi_video_register(); return err; } - -void intel_opregion_free(struct drm_device *dev, int suspend) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_opregion *opregion = &dev_priv->opregion; - - if (!opregion->enabled) - return; - - if (!suspend) - acpi_video_unregister(); - - opregion->acpi->drdy = 0; - - system_opregion = NULL; - unregister_acpi_notifier(&intel_opregion_notifier); - - /* just clear all opregion memory pointers now */ - iounmap(opregion->header); - opregion->header = NULL; - opregion->acpi = NULL; - opregion->swsci = NULL; - opregion->asle = NULL; - - opregion->enabled = 0; -} diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3264bbd47e65..afb96d25219a 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -170,56 +170,143 @@ struct overlay_registers { u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES]; }; -/* overlay flip addr flag */ -#define OFC_UPDATE 0x1 +struct intel_overlay { + struct drm_device *dev; + struct intel_crtc *crtc; + struct drm_i915_gem_object *vid_bo; + struct drm_i915_gem_object *old_vid_bo; + int active; + int pfit_active; + u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ + u32 color_key; + u32 brightness, contrast, saturation; + u32 old_xscale, old_yscale; + /* register access */ + u32 flip_addr; + struct drm_i915_gem_object *reg_bo; + /* flip handling */ + uint32_t last_flip_req; + void (*flip_tail)(struct intel_overlay *); +}; -#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) -#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) - - -static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) +static struct overlay_registers * +intel_overlay_map_regs(struct intel_overlay *overlay) { drm_i915_private_t *dev_priv = overlay->dev->dev_private; struct overlay_registers *regs; - /* no recursive mappings */ - BUG_ON(overlay->virt_addr); - - if (OVERLAY_NONPHYSICAL(overlay->dev)) { - regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - overlay->reg_bo->gtt_offset); - - if (!regs) { - DRM_ERROR("failed to map overlay regs in GTT\n"); - return NULL; - } - } else + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) regs = overlay->reg_bo->phys_obj->handle->vaddr; + else + regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, + overlay->reg_bo->gtt_offset); - return overlay->virt_addr = regs; + return regs; } -static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) +static void intel_overlay_unmap_regs(struct intel_overlay *overlay, + struct overlay_registers *regs) { - if (OVERLAY_NONPHYSICAL(overlay->dev)) - io_mapping_unmap_atomic(overlay->virt_addr); + if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + io_mapping_unmap(regs); +} - overlay->virt_addr = NULL; +static int intel_overlay_do_wait_request(struct intel_overlay *overlay, + struct drm_i915_gem_request *request, + bool interruptible, + void (*tail)(struct intel_overlay *)) +{ + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; - return; + BUG_ON(overlay->last_flip_req); + overlay->last_flip_req = + i915_add_request(dev, NULL, request, &dev_priv->render_ring); + if (overlay->last_flip_req == 0) + return -ENOMEM; + + overlay->flip_tail = tail; + ret = i915_do_wait_request(dev, + overlay->last_flip_req, true, + &dev_priv->render_ring); + if (ret) + return ret; + + overlay->last_flip_req = 0; + return 0; +} + +/* Workaround for i830 bug where pipe a must be enable to change control regs */ +static int +i830_activate_pipe_a(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *crtc; + struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_display_mode vesa_640x480 = { + DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, + 752, 800, 0, 480, 489, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) + }, *mode; + + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]); + if (crtc->dpms_mode == DRM_MODE_DPMS_ON) + return 0; + + /* most i8xx have pipe a forced on, so don't trust dpms mode */ + if (I915_READ(PIPEACONF) & PIPECONF_ENABLE) + return 0; + + crtc_funcs = crtc->base.helper_private; + if (crtc_funcs->dpms == NULL) + return 0; + + DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); + + mode = drm_mode_duplicate(dev, &vesa_640x480); + drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); + if(!drm_crtc_helper_set_mode(&crtc->base, mode, + crtc->base.x, crtc->base.y, + crtc->base.fb)) + return 0; + + crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON); + return 1; +} + +static void +i830_deactivate_pipe_a(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); } /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; + struct drm_i915_gem_request *request; + int pipe_a_quirk = 0; int ret; - drm_i915_private_t *dev_priv = dev->dev_private; BUG_ON(overlay->active); - overlay->active = 1; - overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; + + if (IS_I830(dev)) { + pipe_a_quirk = i830_activate_pipe_a(dev); + if (pipe_a_quirk < 0) + return pipe_a_quirk; + } + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) { + ret = -ENOMEM; + goto out; + } BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); @@ -228,32 +315,30 @@ static int intel_overlay_on(struct intel_overlay *overlay) OUT_RING(MI_NOOP); ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; + ret = intel_overlay_do_wait_request(overlay, request, true, NULL); +out: + if (pipe_a_quirk) + i830_deactivate_pipe_a(dev); - ret = i915_do_wait_request(dev, - overlay->last_flip_req, 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - - overlay->hw_wedged = 0; - overlay->last_flip_req = 0; - return 0; + return ret; } /* overlay needs to be enabled in OCMD reg */ -static void intel_overlay_continue(struct intel_overlay *overlay, - bool load_polyphase_filter) +static int intel_overlay_continue(struct intel_overlay *overlay, + bool load_polyphase_filter) { struct drm_device *dev = overlay->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_request *request; u32 flip_addr = overlay->flip_addr; u32 tmp; BUG_ON(!overlay->active); + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + if (load_polyphase_filter) flip_addr |= OFC_UPDATE; @@ -268,111 +353,18 @@ static void intel_overlay_continue(struct intel_overlay *overlay, ADVANCE_LP_RING(); overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); -} - -static int intel_overlay_wait_flip(struct intel_overlay *overlay) -{ - struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int ret; - u32 tmp; - - if (overlay->last_flip_req != 0) { - ret = i915_do_wait_request(dev, overlay->last_flip_req, - 1, &dev_priv->render_ring); - if (ret == 0) { - overlay->last_flip_req = 0; - - tmp = I915_READ(ISR); - - if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) - return 0; - } - } - - /* synchronous slowpath */ - overlay->hw_wedged = RELEASE_OLD_VID; - - BEGIN_LP_RING(2); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - - overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, overlay->last_flip_req, - 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - - overlay->hw_wedged = 0; - overlay->last_flip_req = 0; + i915_add_request(dev, NULL, request, &dev_priv->render_ring); return 0; } -/* overlay needs to be disabled in OCMD reg */ -static int intel_overlay_off(struct intel_overlay *overlay) +static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) { - u32 flip_addr = overlay->flip_addr; - struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + struct drm_gem_object *obj = &overlay->old_vid_bo->base; - BUG_ON(!overlay->active); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); - /* According to intel docs the overlay hw may hang (when switching - * off) without loading the filter coeffs. It is however unclear whether - * this applies to the disabling of the overlay or to the switching off - * of the hw. Do it in both cases */ - flip_addr |= OFC_UPDATE; - - /* wait for overlay to go idle */ - overlay->hw_wedged = SWITCH_OFF_STAGE_1; - - BEGIN_LP_RING(4); - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - - overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, overlay->last_flip_req, - 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - - /* turn overlay off */ - overlay->hw_wedged = SWITCH_OFF_STAGE_2; - - BEGIN_LP_RING(4); - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - - overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, overlay->last_flip_req, - 1, &dev_priv->render_ring); - if (ret != 0) - return ret; - - overlay->hw_wedged = 0; - overlay->last_flip_req = 0; - return ret; + overlay->old_vid_bo = NULL; } static void intel_overlay_off_tail(struct intel_overlay *overlay) @@ -392,96 +384,101 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) overlay->active = 0; } -/* recover from an interruption due to a signal - * We have to be careful not to repeat work forever an make forward progess. */ -int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, - int interruptible) +/* overlay needs to be disabled in OCMD reg */ +static int intel_overlay_off(struct intel_overlay *overlay, + bool interruptible) +{ + struct drm_device *dev = overlay->dev; + u32 flip_addr = overlay->flip_addr; + struct drm_i915_gem_request *request; + + BUG_ON(!overlay->active); + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + + /* According to intel docs the overlay hw may hang (when switching + * off) without loading the filter coeffs. It is however unclear whether + * this applies to the disabling of the overlay or to the switching off + * of the hw. Do it in both cases */ + flip_addr |= OFC_UPDATE; + + BEGIN_LP_RING(6); + /* wait for overlay to go idle */ + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + /* turn overlay off */ + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + ADVANCE_LP_RING(); + + return intel_overlay_do_wait_request(overlay, request, interruptible, + intel_overlay_off_tail); +} + +/* recover from an interruption due to a signal + * We have to be careful not to repeat work forever an make forward progess. */ +static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, + bool interruptible) { struct drm_device *dev = overlay->dev; - struct drm_gem_object *obj; drm_i915_private_t *dev_priv = dev->dev_private; - u32 flip_addr; int ret; - if (overlay->hw_wedged == HW_WEDGED) - return -EIO; - - if (overlay->last_flip_req == 0) { - overlay->last_flip_req = - i915_add_request(dev, NULL, 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - } + if (overlay->last_flip_req == 0) + return 0; ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, &dev_priv->render_ring); - if (ret != 0) + interruptible, &dev_priv->render_ring); + if (ret) return ret; - switch (overlay->hw_wedged) { - case RELEASE_OLD_VID: - obj = &overlay->old_vid_bo->base; - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - overlay->old_vid_bo = NULL; - break; - case SWITCH_OFF_STAGE_1: - flip_addr = overlay->flip_addr; - flip_addr |= OFC_UPDATE; + if (overlay->flip_tail) + overlay->flip_tail(overlay); - overlay->hw_wedged = SWITCH_OFF_STAGE_2; - - BEGIN_LP_RING(4); - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); - - overlay->last_flip_req = i915_add_request(dev, NULL, - 0, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - - ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, &dev_priv->render_ring); - if (ret != 0) - return ret; - - case SWITCH_OFF_STAGE_2: - intel_overlay_off_tail(overlay); - break; - default: - BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP); - } - - overlay->hw_wedged = 0; overlay->last_flip_req = 0; return 0; } /* Wait for pending overlay flip and release old frame. * Needs to be called before the overlay register are changed - * via intel_overlay_(un)map_regs_atomic */ + * via intel_overlay_(un)map_regs + */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { + struct drm_device *dev = overlay->dev; + drm_i915_private_t *dev_priv = dev->dev_private; int ret; - struct drm_gem_object *obj; - /* only wait if there is actually an old frame to release to - * guarantee forward progress */ + /* Only wait if there is actually an old frame to release to + * guarantee forward progress. + */ if (!overlay->old_vid_bo) return 0; - ret = intel_overlay_wait_flip(overlay); - if (ret != 0) - return ret; + if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { + struct drm_i915_gem_request *request; - obj = &overlay->old_vid_bo->base; - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - overlay->old_vid_bo = NULL; + /* synchronous slowpath */ + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + BEGIN_LP_RING(2); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + + ret = intel_overlay_do_wait_request(overlay, request, true, + intel_overlay_release_old_vid_tail); + if (ret) + return ret; + } + + intel_overlay_release_old_vid_tail(overlay); return 0; } @@ -505,65 +502,65 @@ struct put_image_params { static int packed_depth_bytes(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - return 4; - case I915_OVERLAY_YUV411: - /* return 6; not implemented */ - default: - return -EINVAL; + case I915_OVERLAY_YUV422: + return 4; + case I915_OVERLAY_YUV411: + /* return 6; not implemented */ + default: + return -EINVAL; } } static int packed_width_bytes(u32 format, short width) { switch (format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - return width << 1; - default: - return -EINVAL; + case I915_OVERLAY_YUV422: + return width << 1; + default: + return -EINVAL; } } static int uv_hsubsampling(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - case I915_OVERLAY_YUV420: - return 2; - case I915_OVERLAY_YUV411: - case I915_OVERLAY_YUV410: - return 4; - default: - return -EINVAL; + case I915_OVERLAY_YUV422: + case I915_OVERLAY_YUV420: + return 2; + case I915_OVERLAY_YUV411: + case I915_OVERLAY_YUV410: + return 4; + default: + return -EINVAL; } } static int uv_vsubsampling(u32 format) { switch (format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV420: - case I915_OVERLAY_YUV410: - return 2; - case I915_OVERLAY_YUV422: - case I915_OVERLAY_YUV411: - return 1; - default: - return -EINVAL; + case I915_OVERLAY_YUV420: + case I915_OVERLAY_YUV410: + return 2; + case I915_OVERLAY_YUV422: + case I915_OVERLAY_YUV411: + return 1; + default: + return -EINVAL; } } static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) { u32 mask, shift, ret; - if (IS_I9XX(dev)) { - mask = 0x3f; - shift = 6; - } else { + if (IS_GEN2(dev)) { mask = 0x1f; shift = 5; + } else { + mask = 0x3f; + shift = 6; } ret = ((offset + width + mask) >> shift) - (offset >> shift); - if (IS_I9XX(dev)) + if (!IS_GEN2(dev)) ret <<= 1; ret -=1; return ret << 2; @@ -586,7 +583,9 @@ static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, - 0xb000, 0x3000, 0x0800, 0x3000, 0xb000}; + 0xb000, 0x3000, 0x0800, 0x3000, 0xb000 +}; + static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60, 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40, @@ -596,7 +595,8 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = { 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0, 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240, 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0, - 0x3000, 0x0800, 0x3000}; + 0x3000, 0x0800, 0x3000 +}; static void update_polyphase_filter(struct overlay_registers *regs) { @@ -629,29 +629,31 @@ static bool update_scaling_factors(struct intel_overlay *overlay, yscale = 1 << FP_SHIFT; /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/ - xscale_UV = xscale/uv_hscale; - yscale_UV = yscale/uv_vscale; - /* make the Y scale to UV scale ratio an exact multiply */ - xscale = xscale_UV * uv_hscale; - yscale = yscale_UV * uv_vscale; + xscale_UV = xscale/uv_hscale; + yscale_UV = yscale/uv_vscale; + /* make the Y scale to UV scale ratio an exact multiply */ + xscale = xscale_UV * uv_hscale; + yscale = yscale_UV * uv_vscale; /*} else { - xscale_UV = 0; - yscale_UV = 0; - }*/ + xscale_UV = 0; + yscale_UV = 0; + }*/ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale) scale_changed = true; overlay->old_xscale = xscale; overlay->old_yscale = yscale; - regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20) - | ((xscale >> FP_SHIFT) << 16) - | ((xscale & FRACT_MASK) << 3); - regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20) - | ((xscale_UV >> FP_SHIFT) << 16) - | ((xscale_UV & FRACT_MASK) << 3); - regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16) - | ((yscale_UV >> FP_SHIFT) << 0); + regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) | + ((xscale >> FP_SHIFT) << 16) | + ((xscale & FRACT_MASK) << 3)); + + regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) | + ((xscale_UV >> FP_SHIFT) << 16) | + ((xscale_UV & FRACT_MASK) << 3)); + + regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) | + ((yscale_UV >> FP_SHIFT) << 0))); if (scale_changed) update_polyphase_filter(regs); @@ -663,22 +665,28 @@ static void update_colorkey(struct intel_overlay *overlay, struct overlay_registers *regs) { u32 key = overlay->color_key; + switch (overlay->crtc->base.fb->bits_per_pixel) { - case 8: - regs->DCLRKV = 0; - regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; - case 16: - if (overlay->crtc->base.fb->depth == 15) { - regs->DCLRKV = RGB15_TO_COLORKEY(key); - regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; - } else { - regs->DCLRKV = RGB16_TO_COLORKEY(key); - regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; - } - case 24: - case 32: - regs->DCLRKV = key; - regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; + case 8: + regs->DCLRKV = 0; + regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE; + break; + + case 16: + if (overlay->crtc->base.fb->depth == 15) { + regs->DCLRKV = RGB15_TO_COLORKEY(key); + regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE; + } else { + regs->DCLRKV = RGB16_TO_COLORKEY(key); + regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE; + } + break; + + case 24: + case 32: + regs->DCLRKV = key; + regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE; + break; } } @@ -688,48 +696,48 @@ static u32 overlay_cmd_reg(struct put_image_params *params) if (params->format & I915_OVERLAY_YUV_PLANAR) { switch (params->format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - cmd |= OCMD_YUV_422_PLANAR; - break; - case I915_OVERLAY_YUV420: - cmd |= OCMD_YUV_420_PLANAR; - break; - case I915_OVERLAY_YUV411: - case I915_OVERLAY_YUV410: - cmd |= OCMD_YUV_410_PLANAR; - break; + case I915_OVERLAY_YUV422: + cmd |= OCMD_YUV_422_PLANAR; + break; + case I915_OVERLAY_YUV420: + cmd |= OCMD_YUV_420_PLANAR; + break; + case I915_OVERLAY_YUV411: + case I915_OVERLAY_YUV410: + cmd |= OCMD_YUV_410_PLANAR; + break; } } else { /* YUV packed */ switch (params->format & I915_OVERLAY_DEPTH_MASK) { - case I915_OVERLAY_YUV422: - cmd |= OCMD_YUV_422_PACKED; - break; - case I915_OVERLAY_YUV411: - cmd |= OCMD_YUV_411_PACKED; - break; + case I915_OVERLAY_YUV422: + cmd |= OCMD_YUV_422_PACKED; + break; + case I915_OVERLAY_YUV411: + cmd |= OCMD_YUV_411_PACKED; + break; } switch (params->format & I915_OVERLAY_SWAP_MASK) { - case I915_OVERLAY_NO_SWAP: - break; - case I915_OVERLAY_UV_SWAP: - cmd |= OCMD_UV_SWAP; - break; - case I915_OVERLAY_Y_SWAP: - cmd |= OCMD_Y_SWAP; - break; - case I915_OVERLAY_Y_AND_UV_SWAP: - cmd |= OCMD_Y_AND_UV_SWAP; - break; + case I915_OVERLAY_NO_SWAP: + break; + case I915_OVERLAY_UV_SWAP: + cmd |= OCMD_UV_SWAP; + break; + case I915_OVERLAY_Y_SWAP: + cmd |= OCMD_Y_SWAP; + break; + case I915_OVERLAY_Y_AND_UV_SWAP: + cmd |= OCMD_Y_AND_UV_SWAP; + break; } } return cmd; } -int intel_overlay_do_put_image(struct intel_overlay *overlay, - struct drm_gem_object *new_bo, - struct put_image_params *params) +static int intel_overlay_do_put_image(struct intel_overlay *overlay, + struct drm_gem_object *new_bo, + struct put_image_params *params) { int ret, tmp_width; struct overlay_registers *regs; @@ -754,24 +762,24 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; if (!overlay->active) { - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unpin; } regs->OCONFIG = OCONF_CC_OUT_8BIT; - if (IS_I965GM(overlay->dev)) + if (IS_GEN4(overlay->dev)) regs->OCONFIG |= OCONF_CSC_MODE_BT709; regs->OCONFIG |= overlay->crtc->pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay, regs); ret = intel_overlay_on(overlay); if (ret != 0) goto out_unpin; } - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unpin; @@ -787,7 +795,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->SWIDTH = params->src_w; regs->SWIDTHSW = calc_swidthsw(overlay->dev, - params->offset_Y, tmp_width); + params->offset_Y, tmp_width); regs->SHEIGHT = params->src_h; regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; regs->OSTRIDE = params->stride_Y; @@ -798,9 +806,9 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, u32 tmp_U, tmp_V; regs->SWIDTH |= (params->src_w/uv_hscale) << 16; tmp_U = calc_swidthsw(overlay->dev, params->offset_U, - params->src_w/uv_hscale); + params->src_w/uv_hscale); tmp_V = calc_swidthsw(overlay->dev, params->offset_V, - params->src_w/uv_hscale); + params->src_w/uv_hscale); regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; @@ -814,9 +822,11 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->OCMD = overlay_cmd_reg(params); - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay, regs); - intel_overlay_continue(overlay, scale_changed); + ret = intel_overlay_continue(overlay, scale_changed); + if (ret) + goto out_unpin; overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = to_intel_bo(new_bo); @@ -828,20 +838,19 @@ out_unpin: return ret; } -int intel_overlay_switch_off(struct intel_overlay *overlay) +int intel_overlay_switch_off(struct intel_overlay *overlay, + bool interruptible) { - int ret; struct overlay_registers *regs; struct drm_device *dev = overlay->dev; + int ret; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); - if (overlay->hw_wedged) { - ret = intel_overlay_recover_from_interrupt(overlay, 1); - if (ret != 0) - return ret; - } + ret = intel_overlay_recover_from_interrupt(overlay, interruptible); + if (ret != 0) + return ret; if (!overlay->active) return 0; @@ -850,33 +859,29 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) if (ret != 0) return ret; - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); regs->OCMD = 0; - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay, regs); - ret = intel_overlay_off(overlay); + ret = intel_overlay_off(overlay, interruptible); if (ret != 0) return ret; intel_overlay_off_tail(overlay); - return 0; } static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, struct intel_crtc *crtc) { - drm_i915_private_t *dev_priv = overlay->dev->dev_private; - u32 pipeconf; - int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF; + drm_i915_private_t *dev_priv = overlay->dev->dev_private; - if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON) + if (!crtc->active) return -EINVAL; - pipeconf = I915_READ(pipeconf_reg); - /* can't use the overlay with double wide pipe */ - if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE) + if (INTEL_INFO(overlay->dev)->gen < 4 && + (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE) return -EINVAL; return 0; @@ -885,20 +890,22 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - u32 ratio; + drm_i915_private_t *dev_priv = dev->dev_private; u32 pfit_control = I915_READ(PFIT_CONTROL); + u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in - * line with the intel documentation for the i965 */ - if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) { - ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT; - } else { /* on i965 use the PGM reg to read out the autoscaler values */ - ratio = I915_READ(PFIT_PGM_RATIOS); - if (IS_I965G(dev)) - ratio >>= PFIT_VERT_SCALE_SHIFT_965; + * line with the intel documentation for the i965 + */ + if (INTEL_INFO(dev)->gen >= 4) { + /* on i965 use the PGM reg to read out the autoscaler values */ + ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; + } else { + if (pfit_control & VERT_AUTO_SCALE) + ratio = I915_READ(PFIT_AUTO_RATIOS); else - ratio >>= PFIT_VERT_SCALE_SHIFT; + ratio = I915_READ(PFIT_PGM_RATIOS); + ratio >>= PFIT_VERT_SCALE_SHIFT; } overlay->pfit_vscale_ratio = ratio; @@ -909,12 +916,10 @@ static int check_overlay_dst(struct intel_overlay *overlay, { struct drm_display_mode *mode = &overlay->crtc->base.mode; - if ((rec->dst_x < mode->crtc_hdisplay) - && (rec->dst_x + rec->dst_width - <= mode->crtc_hdisplay) - && (rec->dst_y < mode->crtc_vdisplay) - && (rec->dst_y + rec->dst_height - <= mode->crtc_vdisplay)) + if (rec->dst_x < mode->crtc_hdisplay && + rec->dst_x + rec->dst_width <= mode->crtc_hdisplay && + rec->dst_y < mode->crtc_vdisplay && + rec->dst_y + rec->dst_height <= mode->crtc_vdisplay) return 0; else return -EINVAL; @@ -939,53 +944,57 @@ static int check_overlay_src(struct drm_device *dev, struct drm_intel_overlay_put_image *rec, struct drm_gem_object *new_bo) { - u32 stride_mask; - int depth; int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); - size_t tmp; + u32 stride_mask, depth, tmp; /* check src dimensions */ if (IS_845G(dev) || IS_I830(dev)) { - if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY - || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) + if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || + rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; } else { - if (rec->src_height > IMAGE_MAX_HEIGHT - || rec->src_width > IMAGE_MAX_WIDTH) + if (rec->src_height > IMAGE_MAX_HEIGHT || + rec->src_width > IMAGE_MAX_WIDTH) return -EINVAL; } + /* better safe than sorry, use 4 as the maximal subsampling ratio */ - if (rec->src_height < N_VERT_Y_TAPS*4 - || rec->src_width < N_HORIZ_Y_TAPS*4) + if (rec->src_height < N_VERT_Y_TAPS*4 || + rec->src_width < N_HORIZ_Y_TAPS*4) return -EINVAL; /* check alignment constraints */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { - case I915_OVERLAY_RGB: - /* not implemented */ + case I915_OVERLAY_RGB: + /* not implemented */ + return -EINVAL; + + case I915_OVERLAY_YUV_PACKED: + if (uv_vscale != 1) return -EINVAL; - case I915_OVERLAY_YUV_PACKED: - depth = packed_depth_bytes(rec->flags); - if (uv_vscale != 1) - return -EINVAL; - if (depth < 0) - return depth; - /* ignore UV planes */ - rec->stride_UV = 0; - rec->offset_U = 0; - rec->offset_V = 0; - /* check pixel alignment */ - if (rec->offset_Y % depth) - return -EINVAL; - break; - case I915_OVERLAY_YUV_PLANAR: - if (uv_vscale < 0 || uv_hscale < 0) - return -EINVAL; - /* no offset restrictions for planar formats */ - break; - default: + + depth = packed_depth_bytes(rec->flags); + if (depth < 0) + return depth; + + /* ignore UV planes */ + rec->stride_UV = 0; + rec->offset_U = 0; + rec->offset_V = 0; + /* check pixel alignment */ + if (rec->offset_Y % depth) return -EINVAL; + break; + + case I915_OVERLAY_YUV_PLANAR: + if (uv_vscale < 0 || uv_hscale < 0) + return -EINVAL; + /* no offset restrictions for planar formats */ + break; + + default: + return -EINVAL; } if (rec->src_width % uv_hscale) @@ -999,47 +1008,74 @@ static int check_overlay_src(struct drm_device *dev, if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; - if (IS_I965G(dev) && rec->stride_Y < 512) + if (IS_GEN4(dev) && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? - 4 : 8; - if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024) + 4096 : 8192; + if (rec->stride_Y > tmp || rec->stride_UV > 2*1024) return -EINVAL; /* check buffer dimensions */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { - case I915_OVERLAY_RGB: - case I915_OVERLAY_YUV_PACKED: - /* always 4 Y values per depth pixels */ - if (packed_width_bytes(rec->flags, rec->src_width) - > rec->stride_Y) - return -EINVAL; + case I915_OVERLAY_RGB: + case I915_OVERLAY_YUV_PACKED: + /* always 4 Y values per depth pixels */ + if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y) + return -EINVAL; - tmp = rec->stride_Y*rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) - return -EINVAL; - break; - case I915_OVERLAY_YUV_PLANAR: - if (rec->src_width > rec->stride_Y) - return -EINVAL; - if (rec->src_width/uv_hscale > rec->stride_UV) - return -EINVAL; + tmp = rec->stride_Y*rec->src_height; + if (rec->offset_Y + tmp > new_bo->size) + return -EINVAL; + break; - tmp = rec->stride_Y*rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) - return -EINVAL; - tmp = rec->stride_UV*rec->src_height; - tmp /= uv_vscale; - if (rec->offset_U + tmp > new_bo->size - || rec->offset_V + tmp > new_bo->size) - return -EINVAL; - break; + case I915_OVERLAY_YUV_PLANAR: + if (rec->src_width > rec->stride_Y) + return -EINVAL; + if (rec->src_width/uv_hscale > rec->stride_UV) + return -EINVAL; + + tmp = rec->stride_Y * rec->src_height; + if (rec->offset_Y + tmp > new_bo->size) + return -EINVAL; + + tmp = rec->stride_UV * (rec->src_height / uv_vscale); + if (rec->offset_U + tmp > new_bo->size || + rec->offset_V + tmp > new_bo->size) + return -EINVAL; + break; } return 0; } +/** + * Return the pipe currently connected to the panel fitter, + * or -1 if the panel fitter is not present or not in use + */ +static int intel_panel_fitter_pipe(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pfit_control; + + /* i830 doesn't have a panel fitter */ + if (IS_I830(dev)) + return -1; + + pfit_control = I915_READ(PFIT_CONTROL); + + /* See if the panel fitter is in use */ + if ((pfit_control & PFIT_ENABLE) == 0) + return -1; + + /* 965 can place panel fitter on either pipe */ + if (IS_GEN4(dev)) + return (pfit_control >> 29) & 0x3; + + /* older chips can only use pipe 1 */ + return 1; +} + int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1067,7 +1103,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); - ret = intel_overlay_switch_off(overlay); + ret = intel_overlay_switch_off(overlay, true); mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); @@ -1080,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, return -ENOMEM; drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, - DRM_MODE_OBJECT_CRTC); + DRM_MODE_OBJECT_CRTC); if (!drmmode_obj) { ret = -ENOENT; goto out_free; @@ -1088,7 +1124,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); new_bo = drm_gem_object_lookup(dev, file_priv, - put_image_rec->bo_handle); + put_image_rec->bo_handle); if (!new_bo) { ret = -ENOENT; goto out_free; @@ -1097,15 +1133,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); - if (overlay->hw_wedged) { - ret = intel_overlay_recover_from_interrupt(overlay, 1); - if (ret != 0) - goto out_unlock; - } + ret = intel_overlay_recover_from_interrupt(overlay, true); + if (ret != 0) + goto out_unlock; if (overlay->crtc != crtc) { struct drm_display_mode *mode = &crtc->base.mode; - ret = intel_overlay_switch_off(overlay); + ret = intel_overlay_switch_off(overlay, true); if (ret != 0) goto out_unlock; @@ -1116,9 +1150,9 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, overlay->crtc = crtc; crtc->overlay = overlay; - if (intel_panel_fitter_pipe(dev) == crtc->pipe - /* and line to wide, i.e. one-line-mode */ - && mode->hdisplay > 1024) { + /* line too wide, i.e. one-line-mode */ + if (mode->hdisplay > 1024 && + intel_panel_fitter_pipe(dev) == crtc->pipe) { overlay->pfit_active = 1; update_pfit_vscale_ratio(overlay); } else @@ -1131,10 +1165,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, if (overlay->pfit_active) { params->dst_y = ((((u32)put_image_rec->dst_y) << 12) / - overlay->pfit_vscale_ratio); + overlay->pfit_vscale_ratio); /* shifting right rounds downwards, so add 1 */ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) / - overlay->pfit_vscale_ratio) + 1; + overlay->pfit_vscale_ratio) + 1; } else { params->dst_y = put_image_rec->dst_y; params->dst_h = put_image_rec->dst_height; @@ -1146,8 +1180,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, params->src_h = put_image_rec->src_height; params->src_scan_w = put_image_rec->src_scan_width; params->src_scan_h = put_image_rec->src_scan_height; - if (params->src_scan_h > params->src_h - || params->src_scan_w > params->src_w) { + if (params->src_scan_h > params->src_h || + params->src_scan_w > params->src_w) { ret = -EINVAL; goto out_unlock; } @@ -1203,7 +1237,7 @@ static bool check_gamma_bounds(u32 gamma1, u32 gamma2) return false; for (i = 0; i < 3; i++) { - if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) + if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff)) return false; } @@ -1224,16 +1258,18 @@ static bool check_gamma5_errata(u32 gamma5) static int check_gamma(struct drm_intel_overlay_attrs *attrs) { - if (!check_gamma_bounds(0, attrs->gamma0) - || !check_gamma_bounds(attrs->gamma0, attrs->gamma1) - || !check_gamma_bounds(attrs->gamma1, attrs->gamma2) - || !check_gamma_bounds(attrs->gamma2, attrs->gamma3) - || !check_gamma_bounds(attrs->gamma3, attrs->gamma4) - || !check_gamma_bounds(attrs->gamma4, attrs->gamma5) - || !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) + if (!check_gamma_bounds(0, attrs->gamma0) || + !check_gamma_bounds(attrs->gamma0, attrs->gamma1) || + !check_gamma_bounds(attrs->gamma1, attrs->gamma2) || + !check_gamma_bounds(attrs->gamma2, attrs->gamma3) || + !check_gamma_bounds(attrs->gamma3, attrs->gamma4) || + !check_gamma_bounds(attrs->gamma4, attrs->gamma5) || + !check_gamma_bounds(attrs->gamma5, 0x00ffffff)) return -EINVAL; + if (!check_gamma5_errata(attrs->gamma5)) return -EINVAL; + return 0; } @@ -1260,13 +1296,14 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); + ret = -EINVAL; if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { - attrs->color_key = overlay->color_key; + attrs->color_key = overlay->color_key; attrs->brightness = overlay->brightness; - attrs->contrast = overlay->contrast; + attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; - if (IS_I9XX(dev)) { + if (!IS_GEN2(dev)) { attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma2 = I915_READ(OGAMC2); @@ -1274,29 +1311,20 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, attrs->gamma4 = I915_READ(OGAMC4); attrs->gamma5 = I915_READ(OGAMC5); } - ret = 0; } else { - overlay->color_key = attrs->color_key; - if (attrs->brightness >= -128 && attrs->brightness <= 127) { - overlay->brightness = attrs->brightness; - } else { - ret = -EINVAL; + if (attrs->brightness < -128 || attrs->brightness > 127) goto out_unlock; - } - if (attrs->contrast <= 255) { - overlay->contrast = attrs->contrast; - } else { - ret = -EINVAL; + if (attrs->contrast > 255) goto out_unlock; - } - if (attrs->saturation <= 1023) { - overlay->saturation = attrs->saturation; - } else { - ret = -EINVAL; + if (attrs->saturation > 1023) goto out_unlock; - } - regs = intel_overlay_map_regs_atomic(overlay); + overlay->color_key = attrs->color_key; + overlay->brightness = attrs->brightness; + overlay->contrast = attrs->contrast; + overlay->saturation = attrs->saturation; + + regs = intel_overlay_map_regs(overlay); if (!regs) { ret = -ENOMEM; goto out_unlock; @@ -1304,13 +1332,11 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, update_reg_attrs(overlay, regs); - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay, regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { - if (!IS_I9XX(dev)) { - ret = -EINVAL; + if (IS_GEN2(dev)) goto out_unlock; - } if (overlay->active) { ret = -EBUSY; @@ -1318,7 +1344,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, } ret = check_gamma(attrs); - if (ret != 0) + if (ret) goto out_unlock; I915_WRITE(OGAMC0, attrs->gamma0); @@ -1328,9 +1354,9 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, I915_WRITE(OGAMC4, attrs->gamma4); I915_WRITE(OGAMC5, attrs->gamma5); } - ret = 0; } + ret = 0; out_unlock: mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); @@ -1346,7 +1372,7 @@ void intel_setup_overlay(struct drm_device *dev) struct overlay_registers *regs; int ret; - if (!OVERLAY_EXISTS(dev)) + if (!HAS_OVERLAY(dev)) return; overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); @@ -1359,22 +1385,28 @@ void intel_setup_overlay(struct drm_device *dev) goto out_free; overlay->reg_bo = to_intel_bo(reg_bo); - if (OVERLAY_NONPHYSICAL(dev)) { + if (OVERLAY_NEEDS_PHYSICAL(dev)) { + ret = i915_gem_attach_phys_object(dev, reg_bo, + I915_GEM_PHYS_OVERLAY_REGS, + PAGE_SIZE); + if (ret) { + DRM_ERROR("failed to attach phys overlay regs\n"); + goto out_free_bo; + } + overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; + } else { ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; } overlay->flip_addr = overlay->reg_bo->gtt_offset; - } else { - ret = i915_gem_attach_phys_object(dev, reg_bo, - I915_GEM_PHYS_OVERLAY_REGS, - 0); - if (ret) { - DRM_ERROR("failed to attach phys overlay regs\n"); - goto out_free_bo; + + ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); + if (ret) { + DRM_ERROR("failed to move overlay register bo into the GTT\n"); + goto out_unpin_bo; } - overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; } /* init all values */ @@ -1383,21 +1415,22 @@ void intel_setup_overlay(struct drm_device *dev) overlay->contrast = 75; overlay->saturation = 146; - regs = intel_overlay_map_regs_atomic(overlay); + regs = intel_overlay_map_regs(overlay); if (!regs) goto out_free_bo; memset(regs, 0, sizeof(struct overlay_registers)); update_polyphase_filter(regs); - update_reg_attrs(overlay, regs); - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; DRM_INFO("initialized overlay support\n"); return; +out_unpin_bo: + i915_gem_object_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(reg_bo); out_free: @@ -1407,18 +1440,23 @@ out_free: void intel_cleanup_overlay(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; - if (dev_priv->overlay) { - /* The bo's should be free'd by the generic code already. - * Furthermore modesetting teardown happens beforehand so the - * hardware should be off already */ - BUG_ON(dev_priv->overlay->active); + if (!dev_priv->overlay) + return; - kfree(dev_priv->overlay); - } + /* The bo's should be free'd by the generic code already. + * Furthermore modesetting teardown happens beforehand so the + * hardware should be off already */ + BUG_ON(dev_priv->overlay->active); + + drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base); + kfree(dev_priv->overlay); } +#ifdef CONFIG_DEBUG_FS +#include + struct intel_overlay_error_state { struct overlay_registers regs; unsigned long base; @@ -1426,6 +1464,29 @@ struct intel_overlay_error_state { u32 isr; }; +static struct overlay_registers * +intel_overlay_map_regs_atomic(struct intel_overlay *overlay) +{ + drm_i915_private_t *dev_priv = overlay->dev->dev_private; + struct overlay_registers *regs; + + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + regs = overlay->reg_bo->phys_obj->handle->vaddr; + else + regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + overlay->reg_bo->gtt_offset); + + return regs; +} + +static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, + struct overlay_registers *regs) +{ + if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + io_mapping_unmap_atomic(regs); +} + + struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_device *dev) { @@ -1443,17 +1504,17 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); - if (OVERLAY_NONPHYSICAL(overlay->dev)) - error->base = (long) overlay->reg_bo->gtt_offset; - else + if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr; + else + error->base = (long) overlay->reg_bo->gtt_offset; regs = intel_overlay_map_regs_atomic(overlay); if (!regs) goto err; memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); - intel_overlay_unmap_regs_atomic(overlay); + intel_overlay_unmap_regs_atomic(overlay, regs); return error; @@ -1514,3 +1575,4 @@ intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_s P(UVSCALEV); #undef P } +#endif diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index e7f5299d9d57..92ff8f385278 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -30,6 +30,8 @@ #include "intel_drv.h" +#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ + void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode) @@ -109,3 +111,110 @@ done: dev_priv->pch_pf_pos = (x << 16) | y; dev_priv->pch_pf_size = (width << 16) | height; } + +static int is_backlight_combination_mode(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen >= 4) + return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; + + if (IS_GEN2(dev)) + return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; + + return 0; +} + +u32 intel_panel_get_max_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 max; + + if (HAS_PCH_SPLIT(dev)) { + max = I915_READ(BLC_PWM_PCH_CTL2) >> 16; + } else { + max = I915_READ(BLC_PWM_CTL); + if (IS_PINEVIEW(dev)) { + max >>= 17; + } else { + max >>= 16; + if (INTEL_INFO(dev)->gen < 4) + max &= ~1; + } + + if (is_backlight_combination_mode(dev)) + max *= 0xff; + } + + if (max == 0) { + /* XXX add code here to query mode clock or hardware clock + * and program max PWM appropriately. + */ + DRM_ERROR("fixme: max PWM is zero.\n"); + max = 1; + } + + DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); + return max; +} + +u32 intel_panel_get_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + + if (HAS_PCH_SPLIT(dev)) { + val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + } else { + val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + if (IS_PINEVIEW(dev)) + val >>= 1; + + if (is_backlight_combination_mode(dev)){ + u8 lbpc; + + val &= ~1; + pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); + val *= lbpc; + val >>= 1; + } + } + + DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); + return val; +} + +static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_CPU_CTL, val | level); +} + +void intel_panel_set_backlight(struct drm_device *dev, u32 level) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; + + DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); + + if (HAS_PCH_SPLIT(dev)) + return intel_pch_panel_set_backlight(dev, level); + + if (is_backlight_combination_mode(dev)){ + u32 max = intel_panel_get_max_backlight(dev); + u8 lpbc; + + lpbc = level * 0xfe / max + 1; + level /= lpbc; + pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); + } + + tmp = I915_READ(BLC_PWM_CTL); + if (IS_PINEVIEW(dev)) { + tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); + level <<= 1; + } else + tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_CTL, tmp | level); +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cb3508f78bc3..09f2dc353ae2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_drm.h" #include "i915_trace.h" +#include "intel_drv.h" static u32 i915_gem_get_seqno(struct drm_device *dev) { @@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) static void render_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; u32 cmd; @@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev, if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) cmd &= ~MI_NO_WRITE_FLUSH; - if (!IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen < 4) { /* * On the 965, the sampler cache always gets flushed * and this bit is reserved. @@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev, } } -static unsigned int render_ring_get_head(struct drm_device *dev, - struct intel_ring_buffer *ring) +static void ring_write_tail(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value) { drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(PRB0_HEAD) & HEAD_ADDR; + I915_WRITE_TAIL(ring, value); } -static unsigned int render_ring_get_tail(struct drm_device *dev, - struct intel_ring_buffer *ring) +u32 intel_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(PRB0_TAIL) & TAIL_ADDR; -} - -static unsigned int render_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; + u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? + RING_ACTHD(ring->mmio_base) : ACTHD; return I915_READ(acthd_reg); } -static void render_ring_advance_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(PRB0_TAIL, ring->tail); -} - static int init_ring_common(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { u32 head; drm_i915_private_t *dev_priv = dev->dev_private; @@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev, obj_priv = to_intel_bo(ring->gem_object); /* Stop the ring if it's running. */ - I915_WRITE(ring->regs.ctl, 0); - I915_WRITE(ring->regs.head, 0); - I915_WRITE(ring->regs.tail, 0); + I915_WRITE_CTL(ring, 0); + I915_WRITE_HEAD(ring, 0); + ring->write_tail(dev, ring, 0); /* Initialize the ring. */ - I915_WRITE(ring->regs.start, obj_priv->gtt_offset); - head = ring->get_head(dev, ring); + I915_WRITE_START(ring, obj_priv->gtt_offset); + head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ if (head != 0) { DRM_ERROR("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, - I915_READ(ring->regs.ctl), - I915_READ(ring->regs.head), - I915_READ(ring->regs.tail), - I915_READ(ring->regs.start)); + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); - I915_WRITE(ring->regs.head, 0); + I915_WRITE_HEAD(ring, 0); DRM_ERROR("%s head forced to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, - I915_READ(ring->regs.ctl), - I915_READ(ring->regs.head), - I915_READ(ring->regs.tail), - I915_READ(ring->regs.start)); + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); } - I915_WRITE(ring->regs.ctl, + I915_WRITE_CTL(ring, ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | RING_NO_REPORT | RING_VALID); - head = I915_READ(ring->regs.head) & HEAD_ADDR; + head = I915_READ_HEAD(ring) & HEAD_ADDR; /* If the head is still not zero, the ring is dead */ if (head != 0) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, - I915_READ(ring->regs.ctl), - I915_READ(ring->regs.head), - I915_READ(ring->regs.tail), - I915_READ(ring->regs.start)); + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); return -EIO; } if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_kernel_lost_context(dev); else { - ring->head = ring->get_head(dev, ring); - ring->tail = ring->get_tail(dev, ring); + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; @@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev, } static int init_render_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; int ret = init_ring_common(dev, ring); int mode; - if (IS_I9XX(dev) && !IS_GEN3(dev)) { + if (INTEL_INFO(dev)->gen > 3) { mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; if (IS_GEN6(dev)) mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; @@ -250,9 +239,8 @@ do { \ */ static u32 render_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_file *file_priv, - u32 flush_domains) + struct intel_ring_buffer *ring, + u32 flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; u32 seqno; @@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev, } static u32 -render_ring_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +render_ring_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (HAS_PIPE_CONTROL(dev)) @@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev, static void render_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; @@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev, static void render_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; @@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev, spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } -static void render_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) +void intel_ring_setup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; if (IS_GEN6(dev)) { - I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); - I915_READ(HWS_PGA_GEN6); /* posting read */ + I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), + ring->status_page.gfx_addr); + I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ } else { - I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); - I915_READ(HWS_PGA); /* posting read */ + I915_WRITE(RING_HWS_PGA(ring->mmio_base), + ring->status_page.gfx_addr); + I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ } } -void +static void bsd_ring_flush(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, @@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev, intel_ring_advance(dev, ring); } -static inline unsigned int bsd_ring_get_head(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; -} - -static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; -} - -static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return I915_READ(BSD_RING_ACTHD); -} - -static inline void bsd_ring_advance_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(BSD_RING_TAIL, ring->tail); -} - static int init_bsd_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { return init_ring_common(dev, ring); } static u32 -bsd_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_file *file_priv, - u32 flush_domains) +ring_add_request(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 flush_domains) { u32 seqno; @@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev, return seqno; } -static void bsd_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); - I915_READ(BSD_HWS_PGA); -} - static void bsd_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { /* do nothing */ } static void bsd_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { /* do nothing */ } static u32 -bsd_ring_get_gem_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +ring_status_page_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static int -bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; @@ -488,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, return 0; } - static int render_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { drm_i915_private_t *dev_priv = dev->dev_private; int nbox = exec->num_cliprects; @@ -523,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, intel_ring_emit(dev, ring, exec_start + exec_len - 4); intel_ring_emit(dev, ring, 0); } else { - intel_ring_begin(dev, ring, 4); - if (IS_I965G(dev)) { + intel_ring_begin(dev, ring, 2); + if (INTEL_INFO(dev)->gen >= 4) { intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); @@ -539,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, intel_ring_advance(dev, ring); } - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev)) { intel_ring_begin(dev, ring, 2); intel_ring_emit(dev, ring, MI_FLUSH | MI_NO_WRITE_FLUSH | @@ -553,7 +505,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, } static void cleanup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; @@ -573,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev, } static int init_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; @@ -603,7 +555,7 @@ static int init_status_page(struct drm_device *dev, ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); - ring->setup_status_page(dev, ring); + intel_ring_setup_status_page(dev, ring); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); @@ -617,15 +569,18 @@ err: return ret; } - int intel_init_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; struct drm_gem_object *obj; int ret; ring->dev = dev; + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(dev, ring); @@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ring->gem_object = obj; - ret = i915_gem_object_pin(obj, ring->alignment); + ret = i915_gem_object_pin(obj, PAGE_SIZE); if (ret) goto err_unref; @@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_kernel_lost_context(dev); else { - ring->head = ring->get_head(dev, ring); - ring->tail = ring->get_tail(dev, ring); + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; } - INIT_LIST_HEAD(&ring->active_list); - INIT_LIST_HEAD(&ring->request_list); return ret; err_unmap: @@ -691,7 +644,7 @@ err_hws: } void intel_cleanup_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { if (ring->gem_object == NULL) return; @@ -704,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, cleanup_status_page(dev, ring); } -int intel_wrap_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int intel_wrap_ring_buffer(struct drm_device *dev, + struct intel_ring_buffer *ring) { unsigned int *virt; int rem; @@ -731,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev, } int intel_wait_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring, int n) + struct intel_ring_buffer *ring, int n) { unsigned long end; + drm_i915_private_t *dev_priv = dev->dev_private; trace_i915_ring_wait_begin (dev); end = jiffies + 3 * HZ; do { - ring->head = ring->get_head(dev, ring); + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; @@ -753,14 +707,15 @@ int intel_wait_ring_buffer(struct drm_device *dev, master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; } - yield(); + msleep(1); } while (!time_after(jiffies, end)); trace_i915_ring_wait_end (dev); return -EBUSY; } void intel_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, int num_dwords) + struct intel_ring_buffer *ring, + int num_dwords) { int n = 4*num_dwords; if (unlikely(ring->tail + n > ring->size)) @@ -772,97 +727,181 @@ void intel_ring_begin(struct drm_device *dev, } void intel_ring_advance(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring) { ring->tail &= ring->size - 1; - ring->advance_ring(dev, ring); + ring->write_tail(dev, ring, ring->tail); } -void intel_fill_struct(struct drm_device *dev, - struct intel_ring_buffer *ring, - void *data, - unsigned int len) -{ - unsigned int *virt = ring->virtual_start + ring->tail; - BUG_ON((len&~(4-1)) != 0); - intel_ring_begin(dev, ring, len/4); - memcpy(virt, data, len); - ring->tail += len; - ring->tail &= ring->size - 1; - ring->space -= len; - intel_ring_advance(dev, ring); -} - -struct intel_ring_buffer render_ring = { +static const struct intel_ring_buffer render_ring = { .name = "render ring", - .regs = { - .ctl = PRB0_CTL, - .head = PRB0_HEAD, - .tail = PRB0_TAIL, - .start = PRB0_START - }, - .ring_flag = I915_EXEC_RENDER, + .id = RING_RENDER, + .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, - .alignment = PAGE_SIZE, - .virtual_start = NULL, - .dev = NULL, - .gem_object = NULL, - .head = 0, - .tail = 0, - .space = 0, - .user_irq_refcount = 0, - .irq_gem_seqno = 0, - .waiting_gem_seqno = 0, - .setup_status_page = render_setup_status_page, .init = init_render_ring, - .get_head = render_ring_get_head, - .get_tail = render_ring_get_tail, - .get_active_head = render_ring_get_active_head, - .advance_ring = render_ring_advance_ring, + .write_tail = ring_write_tail, .flush = render_ring_flush, .add_request = render_ring_add_request, - .get_gem_seqno = render_ring_get_gem_seqno, + .get_seqno = render_ring_get_seqno, .user_irq_get = render_ring_get_user_irq, .user_irq_put = render_ring_put_user_irq, .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, - .status_page = {NULL, 0, NULL}, - .map = {0,} }; /* ring buffer for bit-stream decoder */ -struct intel_ring_buffer bsd_ring = { +static const struct intel_ring_buffer bsd_ring = { .name = "bsd ring", - .regs = { - .ctl = BSD_RING_CTL, - .head = BSD_RING_HEAD, - .tail = BSD_RING_TAIL, - .start = BSD_RING_START - }, - .ring_flag = I915_EXEC_BSD, + .id = RING_BSD, + .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, - .alignment = PAGE_SIZE, - .virtual_start = NULL, - .dev = NULL, - .gem_object = NULL, - .head = 0, - .tail = 0, - .space = 0, - .user_irq_refcount = 0, - .irq_gem_seqno = 0, - .waiting_gem_seqno = 0, - .setup_status_page = bsd_setup_status_page, .init = init_bsd_ring, - .get_head = bsd_ring_get_head, - .get_tail = bsd_ring_get_tail, - .get_active_head = bsd_ring_get_active_head, - .advance_ring = bsd_ring_advance_ring, + .write_tail = ring_write_tail, .flush = bsd_ring_flush, - .add_request = bsd_ring_add_request, - .get_gem_seqno = bsd_ring_get_gem_seqno, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, - .status_page = {NULL, 0, NULL}, - .map = {0,} + .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, }; + + +static void gen6_bsd_ring_write_tail(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + /* Every tail move must follow the sequence below */ + I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, + GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | + GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); + I915_WRITE(GEN6_BSD_RNCID, 0x0); + + if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & + GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, + 50)) + DRM_ERROR("timed out waiting for IDLE Indicator\n"); + + I915_WRITE_TAIL(ring, value); + I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, + GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | + GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); +} + +static void gen6_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + intel_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_FLUSH_DW); + intel_ring_emit(dev, ring, 0); + intel_ring_emit(dev, ring, 0); + intel_ring_emit(dev, ring, 0); + intel_ring_advance(dev, ring); +} + +static int +gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) +{ + uint32_t exec_start; + + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, + MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); + /* bit0-7 is the length on GEN6+ */ + intel_ring_emit(dev, ring, exec_start); + intel_ring_advance(dev, ring); + + return 0; +} + +/* ring buffer for Video Codec for Gen6+ */ +static const struct intel_ring_buffer gen6_bsd_ring = { + .name = "gen6 bsd ring", + .id = RING_BSD, + .mmio_base = GEN6_BSD_RING_BASE, + .size = 32 * PAGE_SIZE, + .init = init_bsd_ring, + .write_tail = gen6_bsd_ring_write_tail, + .flush = gen6_ring_flush, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, + .user_irq_get = bsd_ring_get_user_irq, + .user_irq_put = bsd_ring_put_user_irq, + .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, +}; + +/* Blitter support (SandyBridge+) */ + +static void +blt_ring_get_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + /* do nothing */ +} +static void +blt_ring_put_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + /* do nothing */ +} + +static const struct intel_ring_buffer gen6_blt_ring = { + .name = "blt ring", + .id = RING_BLT, + .mmio_base = BLT_RING_BASE, + .size = 32 * PAGE_SIZE, + .init = init_ring_common, + .write_tail = ring_write_tail, + .flush = gen6_ring_flush, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, + .user_irq_get = blt_ring_get_user_irq, + .user_irq_put = blt_ring_put_user_irq, + .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, +}; + +int intel_init_render_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + dev_priv->render_ring = render_ring; + + if (!I915_NEED_GFX_HWS(dev)) { + dev_priv->render_ring.status_page.page_addr + = dev_priv->status_page_dmah->vaddr; + memset(dev_priv->render_ring.status_page.page_addr, + 0, PAGE_SIZE); + } + + return intel_init_ring_buffer(dev, &dev_priv->render_ring); +} + +int intel_init_bsd_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (IS_GEN6(dev)) + dev_priv->bsd_ring = gen6_bsd_ring; + else + dev_priv->bsd_ring = bsd_ring; + + return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); +} + +int intel_init_blt_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + dev_priv->blt_ring = gen6_blt_ring; + + return intel_init_ring_buffer(dev, &dev_priv->blt_ring); +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 525e7d3edda8..a05aff0e5764 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -7,25 +7,32 @@ struct intel_hw_status_page { struct drm_gem_object *obj; }; +#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) +#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) +#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) +#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) +#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) +#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) +#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base)) +#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) + struct drm_i915_gem_execbuffer2; struct intel_ring_buffer { const char *name; - struct ring_regs { - u32 ctl; - u32 head; - u32 tail; - u32 start; - } regs; - unsigned int ring_flag; + enum intel_ring_id { + RING_RENDER = 0x1, + RING_BSD = 0x2, + RING_BLT = 0x4, + } id; + u32 mmio_base; unsigned long size; - unsigned int alignment; void *virtual_start; struct drm_device *dev; struct drm_gem_object *gem_object; unsigned int head; unsigned int tail; - unsigned int space; + int space; struct intel_hw_status_page status_page; u32 irq_gem_seqno; /* last seq seem at irq time */ @@ -35,30 +42,22 @@ struct intel_ring_buffer { struct intel_ring_buffer *ring); void (*user_irq_put)(struct drm_device *dev, struct intel_ring_buffer *ring); - void (*setup_status_page)(struct drm_device *dev, - struct intel_ring_buffer *ring); int (*init)(struct drm_device *dev, struct intel_ring_buffer *ring); - unsigned int (*get_head)(struct drm_device *dev, - struct intel_ring_buffer *ring); - unsigned int (*get_tail)(struct drm_device *dev, - struct intel_ring_buffer *ring); - unsigned int (*get_active_head)(struct drm_device *dev, - struct intel_ring_buffer *ring); - void (*advance_ring)(struct drm_device *dev, - struct intel_ring_buffer *ring); + void (*write_tail)(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value); void (*flush)(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains); u32 (*add_request)(struct drm_device *dev, struct intel_ring_buffer *ring, - struct drm_file *file_priv, u32 flush_domains); - u32 (*get_gem_seqno)(struct drm_device *dev, - struct intel_ring_buffer *ring); + u32 (*get_seqno)(struct drm_device *dev, + struct intel_ring_buffer *ring); int (*dispatch_gem_execbuffer)(struct drm_device *dev, struct intel_ring_buffer *ring, struct drm_i915_gem_execbuffer2 *exec, @@ -83,6 +82,20 @@ struct intel_ring_buffer { */ struct list_head request_list; + /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + + /** + * Do we have some not yet emitted requests outstanding? + */ + bool outstanding_lazy_request; + wait_queue_head_t irq_queue; drm_local_map_t map; }; @@ -96,15 +109,13 @@ intel_read_status_page(struct intel_ring_buffer *ring, } int intel_init_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); + struct intel_ring_buffer *ring); void intel_cleanup_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); + struct intel_ring_buffer *ring); int intel_wait_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring, int n); -int intel_wrap_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); + struct intel_ring_buffer *ring, int n); void intel_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, int n); + struct intel_ring_buffer *ring, int n); static inline void intel_ring_emit(struct drm_device *dev, struct intel_ring_buffer *ring, @@ -115,17 +126,19 @@ static inline void intel_ring_emit(struct drm_device *dev, ring->tail += 4; } -void intel_fill_struct(struct drm_device *dev, - struct intel_ring_buffer *ring, - void *data, - unsigned int len); void intel_ring_advance(struct drm_device *dev, struct intel_ring_buffer *ring); u32 intel_ring_get_seqno(struct drm_device *dev, struct intel_ring_buffer *ring); -extern struct intel_ring_buffer render_ring; -extern struct intel_ring_buffer bsd_ring; +int intel_init_render_ring_buffer(struct drm_device *dev); +int intel_init_bsd_ring_buffer(struct drm_device *dev); +int intel_init_blt_ring_buffer(struct drm_device *dev); + +u32 intel_ring_get_active_head(struct drm_device *dev, + struct intel_ring_buffer *ring); +void intel_ring_setup_status_page(struct drm_device *dev, + struct intel_ring_buffer *ring); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index ee73e428a84a..de158b76bcd5 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -65,8 +65,11 @@ static const char *tv_format_names[] = { struct intel_sdvo { struct intel_encoder base; + struct i2c_adapter *i2c; u8 slave_addr; + struct i2c_adapter ddc; + /* Register for the SDVO device: SDVOB or SDVOC */ int sdvo_reg; @@ -104,34 +107,24 @@ struct intel_sdvo { * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; + bool has_audio; /** - * This is set if we detect output of sdvo device as LVDS. + * This is set if we detect output of sdvo device as LVDS and + * have a valid fixed mode to use with the panel. */ bool is_lvds; - /** - * This is sdvo flags for input timing. - */ - uint8_t sdvo_flags; - /** * This is sdvo fixed pannel mode pointer */ struct drm_display_mode *sdvo_lvds_fixed_mode; - /* - * supported encoding mode, used to determine whether HDMI is - * supported - */ - struct intel_sdvo_encode encode; - /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; - /* Mac mini hack -- use the same DDC as the analog connector */ - struct i2c_adapter *analog_ddc_bus; - + /* Input timings for adjusted_mode */ + struct intel_sdvo_dtd input_dtd; }; struct intel_sdvo_connector { @@ -140,11 +133,15 @@ struct intel_sdvo_connector { /* Mark the type of connector */ uint16_t output_flag; + int force_audio; + /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; struct drm_property *tv_format; + struct drm_property *force_audio_property; + /* add the property for the SDVO-TV */ struct drm_property *left; struct drm_property *right; @@ -186,9 +183,15 @@ struct intel_sdvo_connector { u32 cur_dot_crawl, max_dot_crawl; }; -static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder) +static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base); + return container_of(encoder, struct intel_sdvo, base.base); +} + +static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_sdvo, base); } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) @@ -213,7 +216,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, */ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) { - struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 bval = val, cval = val; int i; @@ -245,49 +248,29 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) { - u8 out_buf[2] = { addr, 0 }; - u8 buf[2]; struct i2c_msg msgs[] = { { - .addr = intel_sdvo->slave_addr >> 1, + .addr = intel_sdvo->slave_addr, .flags = 0, .len = 1, - .buf = out_buf, + .buf = &addr, }, { - .addr = intel_sdvo->slave_addr >> 1, + .addr = intel_sdvo->slave_addr, .flags = I2C_M_RD, .len = 1, - .buf = buf, + .buf = ch, } }; int ret; - if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2) - { - *ch = buf[0]; + if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2) return true; - } DRM_DEBUG_KMS("i2c transfer returned %d\n", ret); return false; } -static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch) -{ - u8 out_buf[2] = { addr, ch }; - struct i2c_msg msgs[] = { - { - .addr = intel_sdvo->slave_addr >> 1, - .flags = 0, - .len = 2, - .buf = out_buf, - } - }; - - return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1; -} - #define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} /** Mapping of command numbers to names, for debug output */ static const struct _sdvo_cmd_name { @@ -432,22 +415,6 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, DRM_LOG_KMS("\n"); } -static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, - const void *args, int args_len) -{ - int i; - - intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); - - for (i = 0; i < args_len; i++) { - if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i, - ((u8*)args)[i])) - return false; - } - - return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd); -} - static const char *cmd_status_names[] = { "Power on", "Success", @@ -458,54 +425,115 @@ static const char *cmd_status_names[] = { "Scaling not supported" }; -static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo, - void *response, int response_len, - u8 status) +static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, + const void *args, int args_len) { - int i; + u8 buf[args_len*2 + 2], status; + struct i2c_msg msgs[args_len + 3]; + int i, ret; - DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); - for (i = 0; i < response_len; i++) - DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); - for (; i < 8; i++) - DRM_LOG_KMS(" "); - if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) - DRM_LOG_KMS("(%s)", cmd_status_names[status]); - else - DRM_LOG_KMS("(??? %d)", status); - DRM_LOG_KMS("\n"); + intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); + + for (i = 0; i < args_len; i++) { + msgs[i].addr = intel_sdvo->slave_addr; + msgs[i].flags = 0; + msgs[i].len = 2; + msgs[i].buf = buf + 2 *i; + buf[2*i + 0] = SDVO_I2C_ARG_0 - i; + buf[2*i + 1] = ((u8*)args)[i]; + } + msgs[i].addr = intel_sdvo->slave_addr; + msgs[i].flags = 0; + msgs[i].len = 2; + msgs[i].buf = buf + 2*i; + buf[2*i + 0] = SDVO_I2C_OPCODE; + buf[2*i + 1] = cmd; + + /* the following two are to read the response */ + status = SDVO_I2C_CMD_STATUS; + msgs[i+1].addr = intel_sdvo->slave_addr; + msgs[i+1].flags = 0; + msgs[i+1].len = 1; + msgs[i+1].buf = &status; + + msgs[i+2].addr = intel_sdvo->slave_addr; + msgs[i+2].flags = I2C_M_RD; + msgs[i+2].len = 1; + msgs[i+2].buf = &status; + + ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); + if (ret < 0) { + DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); + return false; + } + if (ret != i+3) { + /* failure in I2C transfer */ + DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); + return false; + } + + i = 3; + while (status == SDVO_CMD_STATUS_PENDING && i--) { + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_CMD_STATUS, + &status)) + return false; + } + if (status != SDVO_CMD_STATUS_SUCCESS) { + DRM_DEBUG_KMS("command returns response %s [%d]\n", + status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???", + status); + return false; + } + + return true; } static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { - int i; + u8 retry = 5; u8 status; - u8 retry = 50; + int i; - while (retry--) { - /* Read the command response */ - for (i = 0; i < response_len; i++) { - if (!intel_sdvo_read_byte(intel_sdvo, - SDVO_I2C_RETURN_0 + i, - &((u8 *)response)[i])) - return false; - } - - /* read the return status */ - if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS, + /* + * The documentation states that all commands will be + * processed within 15µs, and that we need only poll + * the status byte a maximum of 3 times in order for the + * command to be complete. + * + * Check 5 times in case the hardware failed to read the docs. + */ + do { + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_CMD_STATUS, &status)) return false; + } while (status == SDVO_CMD_STATUS_PENDING && --retry); - intel_sdvo_debug_response(intel_sdvo, response, response_len, - status); - if (status != SDVO_CMD_STATUS_PENDING) - break; + DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo)); + if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) + DRM_LOG_KMS("(%s)", cmd_status_names[status]); + else + DRM_LOG_KMS("(??? %d)", status); - mdelay(50); + if (status != SDVO_CMD_STATUS_SUCCESS) + goto log_fail; + + /* Read the command response */ + for (i = 0; i < response_len; i++) { + if (!intel_sdvo_read_byte(intel_sdvo, + SDVO_I2C_RETURN_0 + i, + &((u8 *)response)[i])) + goto log_fail; + DRM_LOG_KMS(" %02X", ((u8 *)response)[i]); } + DRM_LOG_KMS("\n"); + return true; - return status == SDVO_CMD_STATUS_SUCCESS; +log_fail: + DRM_LOG_KMS("\n"); + return false; } static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) @@ -518,71 +546,17 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) return 4; } -/** - * Try to read the response after issuie the DDC switch command. But it - * is noted that we must do the action of reading response and issuing DDC - * switch command in one I2C transaction. Otherwise when we try to start - * another I2C transaction after issuing the DDC bus switch, it will be - * switched to the internal SDVO register. - */ -static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, - u8 target) +static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, + u8 ddc_bus) { - u8 out_buf[2], cmd_buf[2], ret_value[2], ret; - struct i2c_msg msgs[] = { - { - .addr = intel_sdvo->slave_addr >> 1, - .flags = 0, - .len = 2, - .buf = out_buf, - }, - /* the following two are to read the response */ - { - .addr = intel_sdvo->slave_addr >> 1, - .flags = 0, - .len = 1, - .buf = cmd_buf, - }, - { - .addr = intel_sdvo->slave_addr >> 1, - .flags = I2C_M_RD, - .len = 1, - .buf = ret_value, - }, - }; - - intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH, - &target, 1); - /* write the DDC switch command argument */ - intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target); - - out_buf[0] = SDVO_I2C_OPCODE; - out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; - cmd_buf[0] = SDVO_I2C_CMD_STATUS; - cmd_buf[1] = 0; - ret_value[0] = 0; - ret_value[1] = 0; - - ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3); - if (ret != 3) { - /* failure in I2C transfer */ - DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); - return; - } - if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { - DRM_DEBUG_KMS("DDC switch command returns response %d\n", - ret_value[0]); - return; - } - return; + return intel_sdvo_write_cmd(intel_sdvo, + SDVO_CMD_SET_CONTROL_BUS_SWITCH, + &ddc_bus, 1); } static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) { - if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len)) - return false; - - return intel_sdvo_read_response(intel_sdvo, NULL, 0); + return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); } static bool @@ -819,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->flags |= DRM_MODE_FLAG_PVSYNC; } -static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo, - struct intel_sdvo_encode *encode) +static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { - if (intel_sdvo_get_value(intel_sdvo, - SDVO_CMD_GET_SUPP_ENCODE, - encode, sizeof(*encode))) - return true; + struct intel_sdvo_encode encode; - /* non-support means DVI */ - memset(encode, 0, sizeof(*encode)); - return false; + return intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPP_ENCODE, + &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, @@ -874,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) } #endif -static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo, - int index, - uint8_t *data, int8_t size, uint8_t tx_rate) -{ - uint8_t set_buf_index[2]; - - set_buf_index[0] = index; - set_buf_index[1] = 0; - - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, - set_buf_index, 2)) - return false; - - for (; size > 0; size -= 8) { - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) - return false; - - data += 8; - } - - return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); -} - -static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) -{ - uint8_t csum = 0; - int i; - - for (i = 0; i < size; i++) - csum += data[i]; - - return 0x100 - csum; -} - -#define DIP_TYPE_AVI 0x82 -#define DIP_VERSION_AVI 0x2 -#define DIP_LEN_AVI 13 - -struct dip_infoframe { - uint8_t type; - uint8_t version; - uint8_t len; - uint8_t checksum; - union { - struct { - /* Packet Byte #1 */ - uint8_t S:2; - uint8_t B:2; - uint8_t A:1; - uint8_t Y:2; - uint8_t rsvd1:1; - /* Packet Byte #2 */ - uint8_t R:4; - uint8_t M:2; - uint8_t C:2; - /* Packet Byte #3 */ - uint8_t SC:2; - uint8_t Q:2; - uint8_t EC:3; - uint8_t ITC:1; - /* Packet Byte #4 */ - uint8_t VIC:7; - uint8_t rsvd2:1; - /* Packet Byte #5 */ - uint8_t PR:4; - uint8_t rsvd3:4; - /* Packet Byte #6~13 */ - uint16_t top_bar_end; - uint16_t bottom_bar_start; - uint16_t left_bar_end; - uint16_t right_bar_start; - } avi; - struct { - /* Packet Byte #1 */ - uint8_t channel_count:3; - uint8_t rsvd1:1; - uint8_t coding_type:4; - /* Packet Byte #2 */ - uint8_t sample_size:2; /* SS0, SS1 */ - uint8_t sample_frequency:3; - uint8_t rsvd2:3; - /* Packet Byte #3 */ - uint8_t coding_type_private:5; - uint8_t rsvd3:3; - /* Packet Byte #4 */ - uint8_t channel_allocation; - /* Packet Byte #5 */ - uint8_t rsvd4:3; - uint8_t level_shift:4; - uint8_t downmix_inhibit:1; - } audio; - uint8_t payload[28]; - } __attribute__ ((packed)) u; -} __attribute__((packed)); - -static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, - struct drm_display_mode * mode) +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) { struct dip_infoframe avi_if = { .type = DIP_TYPE_AVI, - .version = DIP_VERSION_AVI, + .ver = DIP_VERSION_AVI, .len = DIP_LEN_AVI, }; + uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; + uint8_t set_buf_index[2] = { 1, 0 }; + uint64_t *data = (uint64_t *)&avi_if; + unsigned i; - avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, - 4 + avi_if.len); - return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if, - 4 + avi_if.len, - SDVO_HBUF_TX_VSYNC); + intel_dip_infoframe_csum(&avi_if); + + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2)) + return false; + + for (i = 0; i < sizeof(avi_if); i += 8) { + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, + data, 8)) + return false; + data++; + } + + return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, + &tx_rate, 1); } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) @@ -1022,8 +910,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_sdvo_dtd input_dtd; - /* Reset the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) return false; @@ -1035,14 +921,12 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo, return false; if (!intel_sdvo_get_preferred_input_timing(intel_sdvo, - &input_dtd)) + &intel_sdvo->input_dtd)) return false; - intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); - intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags; + intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd); drm_mode_set_crtcinfo(adjusted_mode, 0); - mode->clock = adjusted_mode->clock; return true; } @@ -1050,7 +934,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); + int multiplier; /* We need to construct preferred input timings based on our * output timings. To do that, we have to set the output @@ -1065,10 +950,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, mode, adjusted_mode); } else if (intel_sdvo->is_lvds) { - drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); - if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, - intel_sdvo->sdvo_lvds_fixed_mode)) + intel_sdvo->sdvo_lvds_fixed_mode)) return false; (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo, @@ -1077,9 +960,10 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, } /* Make the CRTC code factor in the SDVO pixel multiplier. The - * SDVO device will be told of the multiplier during mode_set. + * SDVO device will factor out the multiplier during mode_set. */ - adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); + multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); + intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); return true; } @@ -1092,11 +976,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - u32 sdvox = 0; - int sdvo_pixel_multiply, rate; + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); + u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd; + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); + int rate; if (!mode) return; @@ -1114,28 +999,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, SDVO_CMD_SET_IN_OUT_MAP, &in_out, sizeof(in_out)); - if (intel_sdvo->is_hdmi) { - if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) - return; - - sdvox |= SDVO_AUDIO_ENABLE; - } + /* Set the output timings to the screen */ + if (!intel_sdvo_set_target_output(intel_sdvo, + intel_sdvo->attached_output)) + return; /* We have tried to get input timing in mode_fixup, and filled into - adjusted_mode */ - intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - if (intel_sdvo->is_tv || intel_sdvo->is_lvds) - input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; - - /* If it's a TV, we already set the output timing in mode_fixup. - * Otherwise, the output timing is equal to the input timing. + * adjusted_mode. */ - if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) { + if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { + input_dtd = intel_sdvo->input_dtd; + } else { /* Set the output timing to the screen */ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) return; + intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); } @@ -1143,31 +1023,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (!intel_sdvo_set_target_input(intel_sdvo)) return; - if (intel_sdvo->is_tv) { - if (!intel_sdvo_set_tv_format(intel_sdvo)) - return; - } + if (intel_sdvo->is_hdmi && + !intel_sdvo_set_avi_infoframe(intel_sdvo)) + return; - /* We would like to use intel_sdvo_create_preferred_input_timing() to - * provide the device with a timing it can support, if it supports that - * feature. However, presumably we would need to adjust the CRTC to - * output the preferred timing, and we don't support that currently. - */ -#if 0 - success = intel_sdvo_create_preferred_input_timing(encoder, clock, - width, height); - if (success) { - struct intel_sdvo_dtd *input_dtd; + if (intel_sdvo->is_tv && + !intel_sdvo_set_tv_format(intel_sdvo)) + return; - intel_sdvo_get_preferred_input_timing(encoder, &input_dtd); - intel_sdvo_set_input_timing(encoder, &input_dtd); - } -#else (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); -#endif - sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); - switch (sdvo_pixel_multiply) { + switch (pixel_multiplier) { + default: case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; @@ -1176,14 +1043,14 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, return; /* Set the SDVO control regs. */ - if (IS_I965G(dev)) { - sdvox |= SDVO_BORDER_ENABLE; + if (INTEL_INFO(dev)->gen >= 4) { + sdvox = SDVO_BORDER_ENABLE; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) sdvox |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) sdvox |= SDVO_HSYNC_ACTIVE_HIGH; } else { - sdvox |= I915_READ(intel_sdvo->sdvo_reg); + sdvox = I915_READ(intel_sdvo->sdvo_reg); switch (intel_sdvo->sdvo_reg) { case SDVOB: sdvox &= SDVOB_PRESERVE_MASK; @@ -1196,16 +1063,18 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, } if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; + if (intel_sdvo->has_audio) + sdvox |= SDVO_AUDIO_ENABLE; - if (IS_I965G(dev)) { + if (INTEL_INFO(dev)->gen >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { /* done in crtc_mode_set as it lives inside the dpll register */ } else { - sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; + sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } - if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL) + if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } @@ -1214,7 +1083,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 temp; @@ -1260,8 +1129,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) static int intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -1285,7 +1153,38 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps) { - return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps)); + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_DEVICE_CAPS, + caps, sizeof(*caps))) + return false; + + DRM_DEBUG_KMS("SDVO capabilities:\n" + " vendor_id: %d\n" + " device_id: %d\n" + " device_rev_id: %d\n" + " sdvo_version_major: %d\n" + " sdvo_version_minor: %d\n" + " sdvo_inputs_mask: %d\n" + " smooth_scaling: %d\n" + " sharp_scaling: %d\n" + " up_scaling: %d\n" + " down_scaling: %d\n" + " stall_support: %d\n" + " output_flags: %d\n", + caps->vendor_id, + caps->device_id, + caps->device_rev_id, + caps->sdvo_version_major, + caps->sdvo_version_minor, + caps->sdvo_inputs_mask, + caps->smooth_scaling, + caps->sharp_scaling, + caps->up_scaling, + caps->down_scaling, + caps->stall_support, + caps->output_flags); + + return true; } /* No use! */ @@ -1389,22 +1288,33 @@ intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) return (caps > 1); } +static struct edid * +intel_sdvo_get_edid(struct drm_connector *connector) +{ + struct intel_sdvo *sdvo = intel_attached_sdvo(connector); + return drm_get_edid(connector, &sdvo->ddc); +} + static struct drm_connector * intel_find_analog_connector(struct drm_device *dev) { struct drm_connector *connector; - struct drm_encoder *encoder; - struct intel_sdvo *intel_sdvo; + struct intel_sdvo *encoder; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - intel_sdvo = enc_to_intel_sdvo(encoder); - if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) { - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (encoder == intel_attached_encoder(connector)) + list_for_each_entry(encoder, + &dev->mode_config.encoder_list, + base.base.head) { + if (encoder->base.type == INTEL_OUTPUT_ANALOG) { + list_for_each_entry(connector, + &dev->mode_config.connector_list, + head) { + if (&encoder->base == + intel_attached_encoder(connector)) return connector; } } } + return NULL; } @@ -1424,64 +1334,72 @@ intel_analog_is_connected(struct drm_device *dev) return true; } +/* Mac mini hack -- use the same DDC as the analog connector */ +static struct edid * +intel_sdvo_get_analog_edid(struct drm_connector *connector) +{ + struct drm_i915_private *dev_priv = connector->dev->dev_private; + + if (!intel_analog_is_connected(connector->dev)) + return NULL; + + return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); +} + enum drm_connector_status intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); - enum drm_connector_status status = connector_status_connected; - struct edid *edid = NULL; + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + enum drm_connector_status status; + struct edid *edid; - edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); + edid = intel_sdvo_get_edid(connector); - /* This is only applied to SDVO cards with multiple outputs */ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) { - uint8_t saved_ddc, temp_ddc; - saved_ddc = intel_sdvo->ddc_bus; - temp_ddc = intel_sdvo->ddc_bus >> 1; + u8 ddc, saved_ddc = intel_sdvo->ddc_bus; + /* * Don't use the 1 as the argument of DDC bus switch to get * the EDID. It is used for SDVO SPD ROM. */ - while(temp_ddc > 1) { - intel_sdvo->ddc_bus = temp_ddc; - edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus); - if (edid) { - /* - * When we can get the EDID, maybe it is the - * correct DDC bus. Update it. - */ - intel_sdvo->ddc_bus = temp_ddc; + for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) { + intel_sdvo->ddc_bus = ddc; + edid = intel_sdvo_get_edid(connector); + if (edid) break; - } - temp_ddc >>= 1; } + /* + * If we found the EDID on the other bus, + * assume that is the correct DDC bus. + */ if (edid == NULL) intel_sdvo->ddc_bus = saved_ddc; } - /* when there is no edid and no monitor is connected with VGA - * port, try to use the CRT ddc to read the EDID for DVI-connector + + /* + * When there is no edid and no monitor is connected with VGA + * port, try to use the CRT ddc to read the EDID for DVI-connector. */ - if (edid == NULL && intel_sdvo->analog_ddc_bus && - !intel_analog_is_connected(connector->dev)) - edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus); + if (edid == NULL) + edid = intel_sdvo_get_analog_edid(connector); + status = connector_status_unknown; if (edid != NULL) { - bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); - bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK); - /* DDC bus is shared, match EDID to connector type */ - if (is_digital && need_digital) + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + status = connector_status_connected; intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); - else if (is_digital != need_digital) - status = connector_status_disconnected; - + intel_sdvo->has_audio = drm_detect_monitor_audio(edid); + } connector->display_info.raw_edid = NULL; - } else - status = connector_status_disconnected; - - kfree(edid); + kfree(edid); + } + + if (status == connector_status_connected) { + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + if (intel_sdvo_connector->force_audio) + intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; + } return status; } @@ -1490,13 +1408,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector, bool force) { uint16_t response; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; if (!intel_sdvo_write_cmd(intel_sdvo, - SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) + SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) return connector_status_unknown; if (intel_sdvo->is_tv) { /* add 30ms delay when the output type is SDVO-TV */ @@ -1505,7 +1422,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; - DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); + DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", + response & 0xff, response >> 8, + intel_sdvo_connector->output_flag); if (response == 0) return connector_status_disconnected; @@ -1538,12 +1457,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - int num_modes; + struct edid *edid; /* set the bus switch and get the modes */ - num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); + edid = intel_sdvo_get_edid(connector); /* * Mac mini hack. On this device, the DVI-I connector shares one DDC @@ -1551,12 +1468,14 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ - if (num_modes == 0 && - intel_sdvo->analog_ddc_bus && - !intel_analog_is_connected(connector->dev)) { - /* Switch to the analog ddc bus and try that - */ - (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus); + if (edid == NULL) + edid = intel_sdvo_get_analog_edid(connector); + + if (edid != NULL) { + drm_mode_connector_update_edid_property(connector, edid); + drm_add_edid_modes(connector, edid); + connector->display_info.raw_edid = NULL; + kfree(edid); } } @@ -1627,8 +1546,7 @@ struct drm_display_mode sdvo_tv_modes[] = { static void intel_sdvo_get_tv_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_sdtv_resolution_request tv_res; uint32_t reply = 0, format_map = 0; int i; @@ -1644,7 +1562,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) return; BUILD_BUG_ON(sizeof(tv_res) != 3); - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, + if (!intel_sdvo_write_cmd(intel_sdvo, + SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res))) return; if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) @@ -1662,8 +1581,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_display_mode *newmode; @@ -1672,7 +1590,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus); + intel_ddc_get_modes(connector, intel_sdvo->i2c); if (list_empty(&connector->probed_modes) == false) goto end; @@ -1693,6 +1611,10 @@ end: if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = drm_mode_duplicate(connector->dev, newmode); + + drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, + 0); + intel_sdvo->is_lvds = true; break; } @@ -1775,8 +1697,7 @@ intel_sdvo_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); uint16_t temp_value; uint8_t cmd; @@ -1786,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector, if (ret) return ret; + if (property == intel_sdvo_connector->force_audio_property) { + if (val == intel_sdvo_connector->force_audio) + return 0; + + intel_sdvo_connector->force_audio = val; + + if (val > 0 && intel_sdvo->has_audio) + return 0; + if (val < 0 && !intel_sdvo->has_audio) + return 0; + + intel_sdvo->has_audio = val > 0; + goto done; + } + #define CHECK_PROPERTY(name, NAME) \ if (intel_sdvo_connector->name == property) { \ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ @@ -1879,9 +1815,8 @@ set_value: done: - if (encoder->crtc) { - struct drm_crtc *crtc = encoder->crtc; - + if (intel_sdvo->base.base.crtc) { + struct drm_crtc *crtc = intel_sdvo->base.base.crtc; drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); } @@ -1909,20 +1844,18 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - - if (intel_sdvo->analog_ddc_bus) - intel_i2c_destroy(intel_sdvo->analog_ddc_bus); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, intel_sdvo->sdvo_lvds_fixed_mode); + i2c_del_adapter(&intel_sdvo->ddc); intel_encoder_destroy(encoder); } @@ -1990,54 +1923,49 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, intel_sdvo_guess_ddc_bus(sdvo); } -static bool -intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) +static void +intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, + struct intel_sdvo *sdvo, u32 reg) { - return intel_sdvo_set_target_output(intel_sdvo, - device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && - intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, - &intel_sdvo->is_hdmi, 1); -} + struct sdvo_device_mapping *mapping; + u8 pin, speed; -static struct intel_sdvo * -intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan) -{ - struct drm_device *dev = chan->drm_dev; - struct drm_encoder *encoder; + if (IS_SDVOB(reg)) + mapping = &dev_priv->sdvo_mappings[0]; + else + mapping = &dev_priv->sdvo_mappings[1]; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder); - if (intel_sdvo->base.ddc_bus == &chan->adapter) - return intel_sdvo; + pin = GMBUS_PORT_DPB; + speed = GMBUS_RATE_1MHZ >> 8; + if (mapping->initialized) { + pin = mapping->i2c_pin; + speed = mapping->i2c_speed; } - return NULL; + sdvo->i2c = &dev_priv->gmbus[pin].adapter; + intel_gmbus_set_speed(sdvo->i2c, speed); + intel_gmbus_force_bit(sdvo->i2c, true); } -static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, - struct i2c_msg msgs[], int num) +static bool +intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) { - struct intel_sdvo *intel_sdvo; - struct i2c_algo_bit_data *algo_data; - const struct i2c_algorithm *algo; + int is_hdmi; - algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; - intel_sdvo = - intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *) - (algo_data->data)); - if (intel_sdvo == NULL) - return -EINVAL; + if (!intel_sdvo_check_supp_encode(intel_sdvo)) + return false; - algo = intel_sdvo->base.i2c_bus->algo; + if (!intel_sdvo_set_target_output(intel_sdvo, + device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1)) + return false; - intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus); - return algo->master_xfer(i2c_adap, msgs, num); + is_hdmi = 0; + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1)) + return false; + + return !!is_hdmi; } -static struct i2c_algorithm intel_sdvo_i2c_bit_algo = { - .master_xfer = intel_sdvo_master_xfer, -}; - static u8 intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) { @@ -2076,26 +2004,44 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) } static void -intel_sdvo_connector_init(struct drm_encoder *encoder, - struct drm_connector *connector) +intel_sdvo_connector_init(struct intel_sdvo_connector *connector, + struct intel_sdvo *encoder) { - drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, - connector->connector_type); + drm_connector_init(encoder->base.base.dev, + &connector->base.base, + &intel_sdvo_connector_funcs, + connector->base.base.connector_type); - drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); + drm_connector_helper_add(&connector->base.base, + &intel_sdvo_connector_helper_funcs); - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->base.base.interlace_allowed = 0; + connector->base.base.doublescan_allowed = 0; + connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; - drm_mode_connector_attach_encoder(connector, encoder); - drm_sysfs_connector_add(connector); + intel_connector_attach_encoder(&connector->base, &encoder->base); + drm_sysfs_connector_add(&connector->base.base); +} + +static void +intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) +{ + struct drm_device *dev = connector->base.base.dev; + + connector->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (connector->force_audio_property) { + connector->force_audio_property->values[0] = -1; + connector->force_audio_property->values[1] = 1; + drm_connector_attach_property(&connector->base.base, + connector->force_audio_property, 0); + } } static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { - struct drm_encoder *encoder = &intel_sdvo->base.enc; + struct drm_encoder *encoder = &intel_sdvo->base.base; struct drm_connector *connector; struct intel_connector *intel_connector; struct intel_sdvo_connector *intel_sdvo_connector; @@ -2118,19 +2064,20 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode) - && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device) - && intel_sdvo->is_hdmi) { + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { /* enable hdmi encoding mode if supported */ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); - intel_sdvo_connector_init(encoder, connector); + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + + intel_sdvo_add_hdmi_properties(intel_sdvo_connector); return true; } @@ -2138,36 +2085,36 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) static bool intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) { - struct drm_encoder *encoder = &intel_sdvo->base.enc; - struct drm_connector *connector; - struct intel_connector *intel_connector; - struct intel_sdvo_connector *intel_sdvo_connector; + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) return false; intel_connector = &intel_sdvo_connector->base; - connector = &intel_connector->base; - encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; - connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + connector = &intel_connector->base; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - intel_sdvo->controlled_output |= type; - intel_sdvo_connector->output_flag = type; + intel_sdvo->controlled_output |= type; + intel_sdvo_connector->output_flag = type; - intel_sdvo->is_tv = true; - intel_sdvo->base.needs_tv_clock = true; - intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; + intel_sdvo->is_tv = true; + intel_sdvo->base.needs_tv_clock = true; + intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; - intel_sdvo_connector_init(encoder, connector); + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); - if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) + if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type)) goto err; - if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; - return true; + return true; err: intel_sdvo_destroy(connector); @@ -2177,43 +2124,10 @@ err: static bool intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) { - struct drm_encoder *encoder = &intel_sdvo->base.enc; - struct drm_connector *connector; - struct intel_connector *intel_connector; - struct intel_sdvo_connector *intel_sdvo_connector; - - intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); - if (!intel_sdvo_connector) - return false; - - intel_connector = &intel_sdvo_connector->base; - connector = &intel_connector->base; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; - encoder->encoder_type = DRM_MODE_ENCODER_DAC; - connector->connector_type = DRM_MODE_CONNECTOR_VGA; - - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; - intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; - intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; - } - - intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT)); - - intel_sdvo_connector_init(encoder, connector); - return true; -} - -static bool -intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) -{ - struct drm_encoder *encoder = &intel_sdvo->base.enc; - struct drm_connector *connector; - struct intel_connector *intel_connector; - struct intel_sdvo_connector *intel_sdvo_connector; + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); if (!intel_sdvo_connector) @@ -2221,22 +2135,56 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; - encoder->encoder_type = DRM_MODE_ENCODER_LVDS; - connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; - intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; - intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; - } + if (device == 0) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; + } else if (device == 1) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; + } - intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | + intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT)); + + intel_sdvo_connector_init(intel_sdvo_connector, + intel_sdvo); + return true; +} + +static bool +intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) +{ + struct drm_encoder *encoder = &intel_sdvo->base.base; + struct drm_connector *connector; + struct intel_connector *intel_connector; + struct intel_sdvo_connector *intel_sdvo_connector; + + intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); + if (!intel_sdvo_connector) + return false; + + intel_connector = &intel_sdvo_connector->base; + connector = &intel_connector->base; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + + if (device == 0) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; + } else if (device == 1) { + intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; + intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; + } + + intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | (1 << INTEL_SDVO_LVDS_CLONE_BIT)); - intel_sdvo_connector_init(encoder, connector); - if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) + intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; return true; @@ -2307,7 +2255,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) { - struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_device *dev = intel_sdvo->base.base.dev; struct intel_sdvo_tv_format format; uint32_t format_map, i; @@ -2373,7 +2321,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { - struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; @@ -2502,7 +2450,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, struct intel_sdvo_enhancements_reply enhancements) { - struct drm_device *dev = intel_sdvo->base.enc.dev; + struct drm_device *dev = intel_sdvo->base.base.dev; struct drm_connector *connector = &intel_sdvo_connector->base.base; uint16_t response, data_value[2]; @@ -2535,7 +2483,43 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply); else return true; +} +static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct intel_sdvo *sdvo = adapter->algo_data; + + if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus)) + return -EIO; + + return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num); +} + +static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter) +{ + struct intel_sdvo *sdvo = adapter->algo_data; + return sdvo->i2c->algo->functionality(sdvo->i2c); +} + +static const struct i2c_algorithm intel_sdvo_ddc_proxy = { + .master_xfer = intel_sdvo_ddc_proxy_xfer, + .functionality = intel_sdvo_ddc_proxy_func +}; + +static bool +intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, + struct drm_device *dev) +{ + sdvo->ddc.owner = THIS_MODULE; + sdvo->ddc.class = I2C_CLASS_DDC; + snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); + sdvo->ddc.dev.parent = &dev->pdev->dev; + sdvo->ddc.algo_data = sdvo; + sdvo->ddc.algo = &intel_sdvo_ddc_proxy; + + return i2c_add_adapter(&sdvo->ddc) == 0; } bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) @@ -2543,95 +2527,66 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; - u8 ch[0x40]; int i; - u32 i2c_reg, ddc_reg, analog_ddc_reg; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); if (!intel_sdvo) return false; + if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { + kfree(intel_sdvo); + return false; + } + intel_sdvo->sdvo_reg = sdvo_reg; intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; + /* encoder type will be decided later */ + drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0); - if (HAS_PCH_SPLIT(dev)) { - i2c_reg = PCH_GPIOE; - ddc_reg = PCH_GPIOE; - analog_ddc_reg = PCH_GPIOA; - } else { - i2c_reg = GPIOE; - ddc_reg = GPIOE; - analog_ddc_reg = GPIOA; - } - - /* setup the DDC bus. */ - if (IS_SDVOB(sdvo_reg)) - intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB"); - else - intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC"); - - if (!intel_encoder->i2c_bus) - goto err_inteloutput; - - intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); - - /* Save the bit-banging i2c functionality for use by the DDC wrapper */ - intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; + intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; + intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { - if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) { + u8 byte; + + if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err_i2c; + goto err; } } - /* setup the DDC bus. */ - if (IS_SDVOB(sdvo_reg)) { - intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); - intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, - "SDVOB/VGA DDC BUS"); + if (IS_SDVOB(sdvo_reg)) dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; - } else { - intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); - intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, - "SDVOC/VGA DDC BUS"); + else dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; - } - if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL) - goto err_i2c; - /* Wrap with our custom algo which switches to DDC mode */ - intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; - - /* encoder type will be decided later */ - drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0); - drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) - goto err_enc; + goto err; if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", IS_SDVOB(sdvo_reg) ? 'B' : 'C'); - goto err_enc; + goto err; } intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) - goto err_enc; + goto err; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) - goto err_enc; + goto err; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " @@ -2651,16 +2606,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; -err_enc: - drm_encoder_cleanup(&intel_encoder->enc); -err_i2c: - if (intel_sdvo->analog_ddc_bus != NULL) - intel_i2c_destroy(intel_sdvo->analog_ddc_bus); - if (intel_encoder->ddc_bus != NULL) - intel_i2c_destroy(intel_encoder->ddc_bus); - if (intel_encoder->i2c_bus != NULL) - intel_i2c_destroy(intel_encoder->i2c_bus); -err_inteloutput: +err: + drm_encoder_cleanup(&intel_encoder->base); + i2c_del_adapter(&intel_sdvo->ddc); kfree(intel_sdvo); return false; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 4a117e318a73..2f7681989316 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -48,7 +48,7 @@ struct intel_tv { struct intel_encoder base; int type; - char *tv_format; + const char *tv_format; int margin[4]; u32 save_TV_H_CTL_1; u32 save_TV_H_CTL_2; @@ -350,7 +350,7 @@ static const struct video_levels component_levels = { struct tv_mode { - char *name; + const char *name; int clock; int refresh; /* in millihertz (for precision) */ u32 oversample; @@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = { static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) { - return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base); + return container_of(encoder, struct intel_tv, base.base); +} + +static struct intel_tv *intel_attached_tv(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_tv, + base); } static void @@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode) } static const struct tv_mode * -intel_tv_mode_lookup (char *tv_format) +intel_tv_mode_lookup(const char *tv_format) { int i; @@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format) } static const struct tv_mode * -intel_tv_mode_find (struct intel_tv *intel_tv) +intel_tv_mode_find(struct intel_tv *intel_tv) { return intel_tv_mode_lookup(intel_tv->tv_format); } static enum drm_mode_status -intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) +intel_tv_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); /* Ensure TV refresh is close to desired refresh */ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) < 1000) return MODE_OK; + return MODE_CLOCK_RANGE; } @@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, color_conversion->av); } - if (IS_I965G(dev)) + if (INTEL_INFO(dev)->gen >= 4) I915_WRITE(TV_CLR_KNOBS, 0x00404000); else I915_WRITE(TV_CLR_KNOBS, 0x00606000); @@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); /* Wait for vblank for the disable to take effect */ - if (!IS_I9XX(dev)) + if (IS_GEN2(dev)) intel_wait_for_vblank(dev, intel_crtc->pipe); - I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); + I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE); /* Wait for vblank for the disable to take effect. */ - intel_wait_for_vblank(dev, intel_crtc->pipe); + intel_wait_for_pipe_off(dev, intel_crtc->pipe); /* Filter ctl must be set before TV_WIN_SIZE */ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); @@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); - I915_WRITE(TV_DAC, 0); + I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE); I915_WRITE(TV_CTL, tv_ctl); } @@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = { static int intel_tv_detect_type (struct intel_tv *intel_tv) { - struct drm_encoder *encoder = &intel_tv->base.enc; + struct drm_encoder *encoder = &intel_tv->base.base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; u32 tv_ctl, save_tv_ctl; u32 tv_dac, save_tv_dac; - int type = DRM_MODE_CONNECTOR_Unknown; - - tv_dac = I915_READ(TV_DAC); + int type; /* Disable TV interrupts around load detect or we'll recurse */ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); @@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv) PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); - /* - * Detect TV by polling) - */ - save_tv_dac = tv_dac; - tv_ctl = I915_READ(TV_CTL); - save_tv_ctl = tv_ctl; - tv_ctl &= ~TV_ENC_ENABLE; - tv_ctl &= ~TV_TEST_MODE_MASK; + save_tv_dac = tv_dac = I915_READ(TV_DAC); + save_tv_ctl = tv_ctl = I915_READ(TV_CTL); + + /* Poll for TV detection */ + tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; - tv_dac &= ~TVDAC_SENSE_MASK; - tv_dac &= ~DAC_A_MASK; - tv_dac &= ~DAC_B_MASK; - tv_dac &= ~DAC_C_MASK; + + tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); tv_dac |= (TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | @@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv) DAC_A_0_7_V | DAC_B_0_7_V | DAC_C_0_7_V); + I915_WRITE(TV_CTL, tv_ctl); I915_WRITE(TV_DAC, tv_dac); POSTING_READ(TV_DAC); - msleep(20); - tv_dac = I915_READ(TV_DAC); - I915_WRITE(TV_DAC, save_tv_dac); - I915_WRITE(TV_CTL, save_tv_ctl); - POSTING_READ(TV_CTL); - msleep(20); + intel_wait_for_vblank(intel_tv->base.base.dev, + to_intel_crtc(intel_tv->base.base.crtc)->pipe); - /* - * A B C - * 0 1 1 Composite - * 1 0 X svideo - * 0 0 0 Component - */ - if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { - DRM_DEBUG_KMS("Detected Composite TV connection\n"); - type = DRM_MODE_CONNECTOR_Composite; - } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { - DRM_DEBUG_KMS("Detected S-Video TV connection\n"); - type = DRM_MODE_CONNECTOR_SVIDEO; - } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { - DRM_DEBUG_KMS("Detected Component TV connection\n"); - type = DRM_MODE_CONNECTOR_Component; - } else { - DRM_DEBUG_KMS("No TV connection detected\n"); - type = -1; + type = -1; + if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) { + DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); + /* + * A B C + * 0 1 1 Composite + * 1 0 X svideo + * 0 0 0 Component + */ + if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { + DRM_DEBUG_KMS("Detected Composite TV connection\n"); + type = DRM_MODE_CONNECTOR_Composite; + } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { + DRM_DEBUG_KMS("Detected S-Video TV connection\n"); + type = DRM_MODE_CONNECTOR_SVIDEO; + } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { + DRM_DEBUG_KMS("Detected Component TV connection\n"); + type = DRM_MODE_CONNECTOR_Component; + } else { + DRM_DEBUG_KMS("Unrecognised TV connection\n"); + } } + I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); + I915_WRITE(TV_CTL, save_tv_ctl); + /* Restore interrupt config */ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | @@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv) */ static void intel_tv_find_better_format(struct drm_connector *connector) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); int i; @@ -1344,14 +1347,13 @@ static enum drm_connector_status intel_tv_detect(struct drm_connector *connector, bool force) { struct drm_display_mode mode; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); int type; mode = reported_modes[0]; drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); - if (encoder->crtc && encoder->crtc->enabled) { + if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { type = intel_tv_detect_type(intel_tv); } else if (force) { struct drm_crtc *crtc; @@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force) return connector_status_connected; } -static struct input_res { - char *name; +static const struct input_res { + const char *name; int w, h; -} input_res_table[] = -{ +} input_res_table[] = { {"640x480", 640, 480}, {"800x600", 800, 600}, {"1024x768", 1024, 768}, @@ -1396,8 +1397,7 @@ static void intel_tv_chose_preferred_modes(struct drm_connector *connector, struct drm_display_mode *mode_ptr) { - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) @@ -1422,15 +1422,14 @@ static int intel_tv_get_modes(struct drm_connector *connector) { struct drm_display_mode *mode_ptr; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); int j, count = 0; u64 tmp; for (j = 0; j < ARRAY_SIZE(input_res_table); j++) { - struct input_res *input = &input_res_table[j]; + const struct input_res *input = &input_res_table[j]; unsigned int hactive_s = input->w; unsigned int vactive_s = input->h; @@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop uint64_t val) { struct drm_device *dev = connector->dev; - struct drm_encoder *encoder = intel_attached_encoder(connector); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); - struct drm_crtc *crtc = encoder->crtc; + struct intel_tv *intel_tv = intel_attached_tv(connector); + struct drm_crtc *crtc = intel_tv->base.base.crtc; int ret = 0; bool changed = false; @@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { .mode_valid = intel_tv_mode_valid, .get_modes = intel_tv_get_modes, - .best_encoder = intel_attached_encoder, + .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_tv_enc_funcs = { @@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev) struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; u32 tv_dac_on, tv_dac_off, save_tv_dac; - char **tv_format_names; + char *tv_format_names[ARRAY_SIZE(tv_modes)]; int i, initial_mode = 0; if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) @@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev) drm_connector_init(dev, connector, &intel_tv_connector_funcs, DRM_MODE_CONNECTOR_SVIDEO); - drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs, + drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC); - drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); - intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); - intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); + intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); + intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); intel_tv->type = DRM_MODE_CONNECTOR_Unknown; /* BIOS margin values */ @@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev) intel_tv->margin[TV_MARGIN_RIGHT] = 46; intel_tv->margin[TV_MARGIN_BOTTOM] = 37; - intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); + intel_tv->tv_format = tv_modes[initial_mode].name; - drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); + drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); connector->interlace_allowed = false; connector->doublescan_allowed = false; /* Create TV properties then attach current values */ - tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes), - GFP_KERNEL); - if (!tv_format_names) - goto out; for (i = 0; i < ARRAY_SIZE(tv_modes); i++) - tv_format_names[i] = tv_modes[i].name; - drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); + tv_format_names[i] = (char *)tv_modes[i].name; + drm_mode_create_tv_properties(dev, + ARRAY_SIZE(tv_modes), + tv_format_names); drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, initial_mode); @@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev) drm_connector_attach_property(connector, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); -out: drm_sysfs_connector_add(connector); } diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index ac64f0b0392e..0aaf5f67a436 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -60,8 +60,6 @@ static struct drm_driver driver = { .irq_uninstall = mga_driver_irq_uninstall, .irq_handler = mga_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = mga_ioctls, .dma_ioctl = mga_dma_buffers, .fops = { diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index d2d28048efb2..72730e9ca06c 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -10,6 +10,7 @@ config DRM_NOUVEAU select FB select FRAMEBUFFER_CONSOLE if !EMBEDDED select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT + select ACPI_VIDEO if ACPI help Choose this option for open-source nVidia support. diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index e9b06e4ef2a2..23fa82d667d6 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -9,7 +9,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ - nouveau_dp.o \ + nouveau_dp.o nouveau_ramht.o \ + nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ @@ -23,7 +24,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ nv10_gpio.o nv50_gpio.o \ - nv50_calc.o + nv50_calc.o \ + nv04_pm.o nv50_pm.o nva3_pm.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index c17a055ee3e5..119152606e4c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -292,6 +292,6 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) if (ret < 0) return ret; - nv_connector->edid = edid; + nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 8fa339600fe3..5f21030a293b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -43,9 +43,6 @@ #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) #define LOG_OLD_VALUE(x) -#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x)) -#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x)) - struct init_exec { bool execute; bool repeat; @@ -272,12 +269,6 @@ struct init_tbl_entry { int (*handler)(struct nvbios *, uint16_t, struct init_exec *); }; -struct bit_entry { - uint8_t id[2]; - uint16_t length; - uint16_t offset; -}; - static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); #define MACRO_INDEX_SIZE 2 @@ -1231,7 +1222,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) return 3; } - if (cond & 1) + if (!(cond & 1)) iexec->execute = false; } break; @@ -4675,6 +4666,92 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i return 0; } +struct pll_mapping { + u8 type; + u32 reg; +}; + +static struct pll_mapping nv04_pll_mapping[] = { + { PLL_CORE , NV_PRAMDAC_NVPLL_COEFF }, + { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF }, + { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF }, + { PLL_VPLL1 , NV_RAMDAC_VPLL2 }, + {} +}; + +static struct pll_mapping nv40_pll_mapping[] = { + { PLL_CORE , 0x004000 }, + { PLL_MEMORY, 0x004020 }, + { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF }, + { PLL_VPLL1 , NV_RAMDAC_VPLL2 }, + {} +}; + +static struct pll_mapping nv50_pll_mapping[] = { + { PLL_CORE , 0x004028 }, + { PLL_SHADER, 0x004020 }, + { PLL_UNK03 , 0x004000 }, + { PLL_MEMORY, 0x004008 }, + { PLL_UNK40 , 0x00e810 }, + { PLL_UNK41 , 0x00e818 }, + { PLL_UNK42 , 0x00e824 }, + { PLL_VPLL0 , 0x614100 }, + { PLL_VPLL1 , 0x614900 }, + {} +}; + +static struct pll_mapping nv84_pll_mapping[] = { + { PLL_CORE , 0x004028 }, + { PLL_SHADER, 0x004020 }, + { PLL_MEMORY, 0x004008 }, + { PLL_UNK05 , 0x004030 }, + { PLL_UNK41 , 0x00e818 }, + { PLL_VPLL0 , 0x614100 }, + { PLL_VPLL1 , 0x614900 }, + {} +}; + +u32 +get_pll_register(struct drm_device *dev, enum pll_types type) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; + struct pll_mapping *map; + int i; + + if (dev_priv->card_type < NV_40) + map = nv04_pll_mapping; + else + if (dev_priv->card_type < NV_50) + map = nv40_pll_mapping; + else { + u8 *plim = &bios->data[bios->pll_limit_tbl_ptr]; + + if (plim[0] >= 0x30) { + u8 *entry = plim + plim[1]; + for (i = 0; i < plim[3]; i++, entry += plim[2]) { + if (entry[0] == type) + return ROM32(entry[3]); + } + + return 0; + } + + if (dev_priv->chipset == 0x50) + map = nv50_pll_mapping; + else + map = nv84_pll_mapping; + } + + while (map->reg) { + if (map->type == type) + return map->reg; + map++; + } + + return 0; +} + int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim) { /* @@ -4750,6 +4827,17 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims /* initialize all members to zero */ memset(pll_lim, 0, sizeof(struct pll_lims)); + /* if we were passed a type rather than a register, figure + * out the register and store it + */ + if (limit_match > PLL_MAX) + pll_lim->reg = limit_match; + else { + pll_lim->reg = get_pll_register(dev, limit_match); + if (!pll_lim->reg) + return -ENOENT; + } + if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) { uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex]; @@ -4785,7 +4873,6 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims pll_lim->max_usable_log2p = 0x6; } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) { uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen; - uint32_t reg = 0; /* default match */ uint8_t *pll_rec; int i; @@ -4797,37 +4884,22 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims NV_WARN(dev, "Default PLL limit entry has non-zero " "register field\n"); - if (limit_match > MAX_PLL_TYPES) - /* we've been passed a reg as the match */ - reg = limit_match; - else /* limit match is a pll type */ - for (i = 1; i < entries && !reg; i++) { - uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]); - - if (limit_match == NVPLL && - (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000)) - reg = cmpreg; - if (limit_match == MPLL && - (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020)) - reg = cmpreg; - if (limit_match == VPLL1 && - (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010)) - reg = cmpreg; - if (limit_match == VPLL2 && - (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018)) - reg = cmpreg; - } - for (i = 1; i < entries; i++) - if (ROM32(bios->data[plloffs + recordlen * i]) == reg) { + if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) { pllindex = i; break; } + if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) { + NV_ERROR(dev, "Register 0x%08x not found in PLL " + "limits table", pll_lim->reg); + return -ENOENT; + } + pll_rec = &bios->data[plloffs + recordlen * pllindex]; BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n", - pllindex ? reg : 0); + pllindex ? pll_lim->reg : 0); /* * Frequencies are stored in tables in MHz, kHz are more @@ -4877,8 +4949,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims if (cv == 0x51 && !pll_lim->refclk) { uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK); - if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) || - ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) { + if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) || + (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) { if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3) pll_lim->refclk = 200000; else @@ -4891,10 +4963,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims int i; BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n", - limit_match); + pll_lim->reg); for (i = 0; i < entries; i++, entry += recordlen) { - if (ROM32(entry[3]) == limit_match) { + if (ROM32(entry[3]) == pll_lim->reg) { record = &bios->data[ROM16(entry[1])]; break; } @@ -4902,7 +4974,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims if (!record) { NV_ERROR(dev, "Register 0x%08x not found in PLL " - "limits table", limit_match); + "limits table", pll_lim->reg); return -ENOENT; } @@ -4931,10 +5003,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims int i; BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n", - limit_match); + pll_lim->reg); for (i = 0; i < entries; i++, entry += recordlen) { - if (ROM32(entry[3]) == limit_match) { + if (ROM32(entry[3]) == pll_lim->reg) { record = &bios->data[ROM16(entry[1])]; break; } @@ -4942,7 +5014,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims if (!record) { NV_ERROR(dev, "Register 0x%08x not found in PLL " - "limits table", limit_match); + "limits table", pll_lim->reg); return -ENOENT; } @@ -5293,7 +5365,7 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios, if (bitentry->length < 0x5) return 0; - if (bitentry->id[1] < 2) { + if (bitentry->version < 2) { bios->ram_restrict_group_count = bios->data[bitentry->offset + 2]; bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]); } else { @@ -5403,27 +5475,40 @@ struct bit_table { #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) +int +bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; + u8 entries, *entry; + + entries = bios->data[bios->offset + 10]; + entry = &bios->data[bios->offset + 12]; + while (entries--) { + if (entry[0] == id) { + bit->id = entry[0]; + bit->version = entry[1]; + bit->length = ROM16(entry[2]); + bit->offset = ROM16(entry[4]); + bit->data = ROMPTR(bios, entry[4]); + return 0; + } + + entry += bios->data[bios->offset + 9]; + } + + return -ENOENT; +} + static int parse_bit_table(struct nvbios *bios, const uint16_t bitoffset, struct bit_table *table) { struct drm_device *dev = bios->dev; - uint8_t maxentries = bios->data[bitoffset + 4]; - int i, offset; struct bit_entry bitentry; - for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) { - bitentry.id[0] = bios->data[offset]; - - if (bitentry.id[0] != table->id) - continue; - - bitentry.id[1] = bios->data[offset + 1]; - bitentry.length = ROM16(bios->data[offset + 2]); - bitentry.offset = ROM16(bios->data[offset + 4]); - + if (bit_table(dev, table->id, &bitentry) == 0) return table->parse_fn(dev, bios, &bitentry); - } NV_INFO(dev, "BIT table '%c' not found\n", table->id); return -ENOSYS; @@ -5683,8 +5768,14 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len) static struct dcb_gpio_entry * new_gpio_entry(struct nvbios *bios) { + struct drm_device *dev = bios->dev; struct dcb_gpio_table *gpio = &bios->dcb.gpio; + if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) { + NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n"); + return NULL; + } + return &gpio->entry[gpio->entries++]; } @@ -5705,114 +5796,91 @@ nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag) return NULL; } -static void -parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset) -{ - struct dcb_gpio_entry *gpio; - uint16_t ent = ROM16(bios->data[offset]); - uint8_t line = ent & 0x1f, - tag = ent >> 5 & 0x3f, - flags = ent >> 11 & 0x1f; - - if (tag == 0x3f) - return; - - gpio = new_gpio_entry(bios); - - gpio->tag = tag; - gpio->line = line; - gpio->invert = flags != 4; - gpio->entry = ent; -} - -static void -parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) -{ - uint32_t entry = ROM32(bios->data[offset]); - struct dcb_gpio_entry *gpio; - - if ((entry & 0x0000ff00) == 0x0000ff00) - return; - - gpio = new_gpio_entry(bios); - gpio->tag = (entry & 0x0000ff00) >> 8; - gpio->line = (entry & 0x0000001f) >> 0; - gpio->state_default = (entry & 0x01000000) >> 24; - gpio->state[0] = (entry & 0x18000000) >> 27; - gpio->state[1] = (entry & 0x60000000) >> 29; - gpio->entry = entry; -} - static void parse_dcb_gpio_table(struct nvbios *bios) { struct drm_device *dev = bios->dev; - uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr; - uint8_t *gpio_table = &bios->data[gpio_table_ptr]; - int header_len = gpio_table[1], - entries = gpio_table[2], - entry_len = gpio_table[3]; - void (*parse_entry)(struct nvbios *, uint16_t) = NULL; + struct dcb_gpio_entry *e; + u8 headerlen, entries, recordlen; + u8 *dcb, *gpio = NULL, *entry; int i; - if (bios->dcb.version >= 0x40) { - if (gpio_table_ptr && entry_len != 4) { - NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); - return; - } + dcb = ROMPTR(bios, bios->data[0x36]); + if (dcb[0] >= 0x30) { + gpio = ROMPTR(bios, dcb[10]); + if (!gpio) + goto no_table; - parse_entry = parse_dcb40_gpio_entry; + headerlen = gpio[1]; + entries = gpio[2]; + recordlen = gpio[3]; + } else + if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) { + gpio = ROMPTR(bios, dcb[-15]); + if (!gpio) + goto no_table; - } else if (bios->dcb.version >= 0x30) { - if (gpio_table_ptr && entry_len != 2) { - NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); - return; - } - - parse_entry = parse_dcb30_gpio_entry; - - } else if (bios->dcb.version >= 0x22) { - /* - * DCBs older than v3.0 don't really have a GPIO - * table, instead they keep some GPIO info at fixed - * locations. - */ - uint16_t dcbptr = ROM16(bios->data[0x36]); - uint8_t *tvdac_gpio = &bios->data[dcbptr - 5]; + headerlen = 3; + entries = gpio[2]; + recordlen = gpio[1]; + } else + if (dcb[0] >= 0x22) { + /* No GPIO table present, parse the TVDAC GPIO data. */ + uint8_t *tvdac_gpio = &dcb[-5]; if (tvdac_gpio[0] & 1) { - struct dcb_gpio_entry *gpio = new_gpio_entry(bios); - - gpio->tag = DCB_GPIO_TVDAC0; - gpio->line = tvdac_gpio[1] >> 4; - gpio->invert = tvdac_gpio[0] & 2; + e = new_gpio_entry(bios); + e->tag = DCB_GPIO_TVDAC0; + e->line = tvdac_gpio[1] >> 4; + e->invert = tvdac_gpio[0] & 2; } + + goto no_table; } else { - /* - * No systematic way to store GPIO info on pre-v2.2 - * DCBs, try to match the PCI device IDs. - */ + NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]); + goto no_table; + } - /* Apple iMac G4 NV18 */ - if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) { - struct dcb_gpio_entry *gpio = new_gpio_entry(bios); + entry = gpio + headerlen; + for (i = 0; i < entries; i++, entry += recordlen) { + e = new_gpio_entry(bios); + if (!e) + break; - gpio->tag = DCB_GPIO_TVDAC0; - gpio->line = 4; + if (gpio[0] < 0x40) { + e->entry = ROM16(entry[0]); + e->tag = (e->entry & 0x07e0) >> 5; + if (e->tag == 0x3f) { + bios->dcb.gpio.entries--; + continue; + } + + e->line = (e->entry & 0x001f); + e->invert = ((e->entry & 0xf800) >> 11) != 4; + } else { + e->entry = ROM32(entry[0]); + e->tag = (e->entry & 0x0000ff00) >> 8; + if (e->tag == 0xff) { + bios->dcb.gpio.entries--; + continue; + } + + e->line = (e->entry & 0x0000001f) >> 0; + e->state_default = (e->entry & 0x01000000) >> 24; + e->state[0] = (e->entry & 0x18000000) >> 27; + e->state[1] = (e->entry & 0x60000000) >> 29; } - } - if (!gpio_table_ptr) - return; - - if (entries > DCB_MAX_NUM_GPIO_ENTRIES) { - NV_WARN(dev, "Too many entries in the DCB GPIO table.\n"); - entries = DCB_MAX_NUM_GPIO_ENTRIES; +no_table: + /* Apple iMac G4 NV18 */ + if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) { + e = new_gpio_entry(bios); + if (e) { + e->tag = DCB_GPIO_TVDAC0; + e->line = 4; + } } - - for (i = 0; i < entries; i++) - parse_entry(bios, gpio_table_ptr + header_len + entry_len * i); } struct dcb_connector_table_entry * @@ -6680,6 +6748,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev) bit_signature, sizeof(bit_signature)); if (offset) { NV_TRACE(dev, "BIT BIOS found\n"); + bios->type = NVBIOS_BIT; + bios->offset = offset; return parse_bit_structure(bios, offset + 6); } @@ -6687,6 +6757,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev) bmp_signature, sizeof(bmp_signature)); if (offset) { NV_TRACE(dev, "BMP BIOS found\n"); + bios->type = NVBIOS_BMP; + bios->offset = offset; return parse_bmp_structure(dev, bios, offset); } @@ -6806,6 +6878,8 @@ nouveau_bios_init(struct drm_device *dev) "running VBIOS init tables.\n"); bios->execute = true; } + if (nouveau_force_post) + bios->execute = true; ret = nouveau_run_vbios_init(dev); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index c1de2f3fcb0e..50a648e01c49 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -34,6 +34,20 @@ #define DCB_LOC_ON_CHIP 0 +#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x)) +#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x)) +#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL) + +struct bit_entry { + uint8_t id; + uint8_t version; + uint16_t length; + uint16_t offset; + uint8_t *data; +}; + +int bit_table(struct drm_device *, u8 id, struct bit_entry *); + struct dcb_i2c_entry { uint32_t entry; uint8_t port_type; @@ -170,16 +184,28 @@ enum LVDS_script { LVDS_PANEL_OFF }; -/* changing these requires matching changes to reg tables in nv_get_clock */ -#define MAX_PLL_TYPES 4 +/* these match types in pll limits table version 0x40, + * nouveau uses them on all chipsets internally where a + * specific pll needs to be referenced, but the exact + * register isn't known. + */ enum pll_types { - NVPLL, - MPLL, - VPLL1, - VPLL2 + PLL_CORE = 0x01, + PLL_SHADER = 0x02, + PLL_UNK03 = 0x03, + PLL_MEMORY = 0x04, + PLL_UNK05 = 0x05, + PLL_UNK40 = 0x40, + PLL_UNK41 = 0x41, + PLL_UNK42 = 0x42, + PLL_VPLL0 = 0x80, + PLL_VPLL1 = 0x81, + PLL_MAX = 0xff }; struct pll_lims { + u32 reg; + struct { int minfreq; int maxfreq; @@ -212,6 +238,11 @@ struct pll_lims { struct nvbios { struct drm_device *dev; + enum { + NVBIOS_BMP, + NVBIOS_BIT + } type; + uint16_t offset; uint8_t chip_version; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f6f44779d82f..80353e2b8409 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -36,21 +36,6 @@ #include #include -int -nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan) -{ - struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; - int ret; - - if (!prev_fence || nouveau_fence_channel(prev_fence) == chan) - return 0; - - spin_lock(&nvbo->bo.lock); - ret = ttm_bo_wait(&nvbo->bo, false, false, false); - spin_unlock(&nvbo->bo.lock); - return ret; -} - static void nouveau_bo_del_ttm(struct ttm_buffer_object *bo) { @@ -58,8 +43,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - ttm_bo_kunmap(&nvbo->kmap); - if (unlikely(nvbo->gem)) DRM_ERROR("bo %p still attached to GEM object\n", bo); @@ -164,8 +147,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); align >>= PAGE_SHIFT; - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; nouveau_bo_placement_set(nvbo, flags, 0); nvbo->channel = chan; @@ -305,7 +286,8 @@ nouveau_bo_map(struct nouveau_bo *nvbo) void nouveau_bo_unmap(struct nouveau_bo *nvbo) { - ttm_bo_kunmap(&nvbo->kmap); + if (nvbo) + ttm_bo_kunmap(&nvbo->kmap); } u16 @@ -399,14 +381,19 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - man->gpu_offset = dev_priv->vm_vram_base; + if (dev_priv->card_type == NV_50) + man->gpu_offset = 0x40000000; + else + man->gpu_offset = 0; break; case TTM_PL_TT: + man->func = &ttm_bo_manager_func; switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; @@ -469,19 +456,26 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, if (ret) return ret; - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, - evict || (nvbo->channel && - nvbo->channel != chan), + if (nvbo->channel) { + ret = nouveau_fence_sync(fence, nvbo->channel); + if (ret) + goto out; + } + + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); +out: nouveau_fence_unref((void *)&fence); return ret; } static inline uint32_t -nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, - struct ttm_mem_reg *mem) +nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, + struct nouveau_channel *chan, struct ttm_mem_reg *mem) { - if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) { + struct nouveau_bo *nvbo = nouveau_bo(bo); + + if (nvbo->no_vm) { if (mem->mem_type == TTM_PL_TT) return NvDmaGART; return NvDmaVRAM; @@ -493,86 +487,181 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, } static int -nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) +nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { - struct nouveau_bo *nvbo = nouveau_bo(bo); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct ttm_mem_reg *old_mem = &bo->mem; - struct nouveau_channel *chan; - uint64_t src_offset, dst_offset; - uint32_t page_count; + struct nouveau_bo *nvbo = nouveau_bo(bo); + u64 length = (new_mem->num_pages << PAGE_SHIFT); + u64 src_offset, dst_offset; int ret; - chan = nvbo->channel; - if (!chan || nvbo->tile_flags || nvbo->no_vm) - chan = dev_priv->channel; - - src_offset = old_mem->mm_node->start << PAGE_SHIFT; - dst_offset = new_mem->mm_node->start << PAGE_SHIFT; - if (chan != dev_priv->channel) { - if (old_mem->mem_type == TTM_PL_TT) - src_offset += dev_priv->vm_gart_base; - else + src_offset = old_mem->start << PAGE_SHIFT; + dst_offset = new_mem->start << PAGE_SHIFT; + if (!nvbo->no_vm) { + if (old_mem->mem_type == TTM_PL_VRAM) src_offset += dev_priv->vm_vram_base; - - if (new_mem->mem_type == TTM_PL_TT) - dst_offset += dev_priv->vm_gart_base; else + src_offset += dev_priv->vm_gart_base; + + if (new_mem->mem_type == TTM_PL_VRAM) dst_offset += dev_priv->vm_vram_base; + else + dst_offset += dev_priv->vm_gart_base; } ret = RING_SPACE(chan, 3); if (ret) return ret; - BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); - OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem)); - OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem)); - if (dev_priv->card_type >= NV_50) { - ret = RING_SPACE(chan, 4); + BEGIN_RING(chan, NvSubM2MF, 0x0184, 2); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); + + while (length) { + u32 amount, stride, height; + + amount = min(length, (u64)(4 * 1024 * 1024)); + stride = 16 * 4; + height = amount / stride; + + if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + } else { + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); + OUT_RING (chan, 1); + } + if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + } else { + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); + OUT_RING (chan, 1); + } + + ret = RING_SPACE(chan, 14); if (ret) return ret; - BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); - OUT_RING(chan, 1); - BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); - OUT_RING(chan, 1); + + BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, upper_32_bits(dst_offset)); + BEGIN_RING(chan, NvSubM2MF, 0x030c, 8); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + OUT_RING (chan, stride); + OUT_RING (chan, stride); + OUT_RING (chan, stride); + OUT_RING (chan, height); + OUT_RING (chan, 0x00000101); + OUT_RING (chan, 0x00000000); + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + OUT_RING (chan, 0); + + length -= amount; + src_offset += amount; + dst_offset += amount; } + return 0; +} + +static int +nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + u32 src_offset = old_mem->start << PAGE_SHIFT; + u32 dst_offset = new_mem->start << PAGE_SHIFT; + u32 page_count = new_mem->num_pages; + int ret; + + ret = RING_SPACE(chan, 3); + if (ret) + return ret; + + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); + page_count = new_mem->num_pages; while (page_count) { int line_count = (page_count > 2047) ? 2047 : page_count; - if (dev_priv->card_type >= NV_50) { - ret = RING_SPACE(chan, 3); - if (ret) - return ret; - BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); - OUT_RING(chan, upper_32_bits(src_offset)); - OUT_RING(chan, upper_32_bits(dst_offset)); - } ret = RING_SPACE(chan, 11); if (ret) return ret; + BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); - OUT_RING(chan, lower_32_bits(src_offset)); - OUT_RING(chan, lower_32_bits(dst_offset)); - OUT_RING(chan, PAGE_SIZE); /* src_pitch */ - OUT_RING(chan, PAGE_SIZE); /* dst_pitch */ - OUT_RING(chan, PAGE_SIZE); /* line_length */ - OUT_RING(chan, line_count); - OUT_RING(chan, (1<<8)|(1<<0)); - OUT_RING(chan, 0); + OUT_RING (chan, src_offset); + OUT_RING (chan, dst_offset); + OUT_RING (chan, PAGE_SIZE); /* src_pitch */ + OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ + OUT_RING (chan, PAGE_SIZE); /* line_length */ + OUT_RING (chan, line_count); + OUT_RING (chan, 0x00000101); + OUT_RING (chan, 0x00000000); BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); - OUT_RING(chan, 0); + OUT_RING (chan, 0); page_count -= line_count; src_offset += (PAGE_SIZE * line_count); dst_offset += (PAGE_SIZE * line_count); } + return 0; +} + +static int +nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_channel *chan; + int ret; + + chan = nvbo->channel; + if (!chan || nvbo->no_vm) + chan = dev_priv->channel; + + if (dev_priv->card_type < NV_50) + ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); + else + ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); + if (ret) + return ret; + return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); } @@ -606,12 +695,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); out: - if (tmp_mem.mm_node) { - spin_lock(&bo->bdev->glob->lru_lock); - drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&bo->bdev->glob->lru_lock); - } - + ttm_bo_mem_put(bo, &tmp_mem); return ret; } @@ -644,12 +728,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, goto out; out: - if (tmp_mem.mm_node) { - spin_lock(&bo->bdev->glob->lru_lock); - drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&bo->bdev->glob->lru_lock); - } - + ttm_bo_mem_put(bo, &tmp_mem); return ret; } @@ -669,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, return 0; } - offset = new_mem->mm_node->start << PAGE_SHIFT; + offset = new_mem->start << PAGE_SHIFT; if (dev_priv->card_type == NV_50) { ret = nv50_mem_vm_bind_linear(dev, @@ -719,12 +798,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - /* Software copy if the card isn't up and running yet. */ - if (!dev_priv->channel) { - ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); - goto out; - } - /* Fake bo copy. */ if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { BUG_ON(bo->mem.mm_node != NULL); @@ -733,6 +806,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, goto out; } + /* Software copy if the card isn't up and running yet. */ + if (!dev_priv->channel) { + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + goto out; + } + /* Hardware assisted copy. */ if (new_mem->mem_type == TTM_PL_SYSTEM) ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); @@ -783,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) case TTM_PL_TT: #if __OS_HAS_AGP if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = dev_priv->gart_info.aper_base; mem->bus.is_iomem = true; } #endif break; case TTM_PL_VRAM: - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; break; @@ -808,7 +887,26 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static int nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { - return 0; + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + /* as long as the bo isn't in vram, and isn't tiled, we've got + * nothing to do here. + */ + if (bo->mem.mem_type != TTM_PL_VRAM) { + if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) + return 0; + } + + /* make sure bo is in mappable vram */ + if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) + return 0; + + + nvbo->placement.fpfn = 0; + nvbo->placement.lpfn = dev_priv->fb_mappable_pages; + nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); + return ttm_bo_validate(bo, &nvbo->placement, false, true, false); } struct ttm_bo_driver nouveau_bo_driver = { diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c index ca85da784846..dad96cce5e39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_calc.c +++ b/drivers/gpu/drm/nouveau/nouveau_calc.c @@ -198,8 +198,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv_fifo_info fifo_data; struct nv_sim_state sim_data; - int MClk = nouveau_hw_get_clock(dev, MPLL); - int NVClk = nouveau_hw_get_clock(dev, NVPLL); + int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); + int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1); sim_data.pclk_khz = VClk; @@ -234,7 +234,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, } static void -nv30_update_arb(int *burst, int *lwm) +nv20_update_arb(int *burst, int *lwm) { unsigned int fifo_size, burst_size, graphics_lwm; @@ -251,14 +251,14 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm { struct drm_nouveau_private *dev_priv = dev->dev_private; - if (dev_priv->card_type < NV_30) + if (dev_priv->card_type < NV_20) nv04_update_arb(dev, vclk, bpp, burst, lwm); else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { *burst = 128; *lwm = 0x0480; } else - nv30_update_arb(burst, lwm); + nv20_update_arb(burst, lwm); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 0480f064f2c1..373950e34814 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RO, &pushbuf, NULL); - chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; + chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf); - chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; + chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in @@ -67,17 +67,11 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); - chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; - } - - ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); - if (ret) { - NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret); - if (pushbuf != dev_priv->gart_info.sg_ctxdma) - nouveau_gpuobj_del(dev, &pushbuf); - return ret; + chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } + nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); + nouveau_gpuobj_ref(NULL, &pushbuf); return 0; } @@ -229,7 +223,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ret = nouveau_dma_init(chan); if (!ret) - ret = nouveau_fence_init(chan); + ret = nouveau_fence_channel_init(chan); if (ret) { nouveau_channel_free(chan); return ret; @@ -276,7 +270,7 @@ nouveau_channel_free(struct nouveau_channel *chan) * above attempts at idling were OK, but if we failed this'll tell TTM * we're done with the buffers. */ - nouveau_fence_fini(chan); + nouveau_fence_channel_fini(chan); /* This will prevent pfifo from switching channels. */ pfifo->reassign(dev, false); @@ -308,8 +302,9 @@ nouveau_channel_free(struct nouveau_channel *chan) spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); /* Release the channel's resources */ - nouveau_gpuobj_ref_del(dev, &chan->pushbuf); + nouveau_gpuobj_ref(NULL, &chan->pushbuf); if (chan->pushbuf_bo) { + nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index fc737037f751..0871495096fa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -76,6 +76,22 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder) return NULL; } +/*TODO: This could use improvement, and learn to handle the fixed + * BIOS tables etc. It's fine currently, for its only user. + */ +int +nouveau_connector_bpp(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + + if (nv_connector->edid && nv_connector->edid->revision >= 4) { + u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4; + if (bpc > 4) + return bpc; + } + + return 18; +} static void nouveau_connector_destroy(struct drm_connector *drm_connector) @@ -130,6 +146,36 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, return NULL; } +static struct nouveau_encoder * +nouveau_connector_of_detect(struct drm_connector *connector) +{ +#ifdef __powerpc__ + struct drm_device *dev = connector->dev; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder; + struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); + + if (!dn || + !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) || + (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG)))) + return NULL; + + for_each_child_of_node(dn, cn) { + const char *name = of_get_property(cn, "name", NULL); + const void *edid = of_get_property(cn, "EDID", NULL); + int idx = name ? name[strlen(name) - 1] - 'A' : 0; + + if (nv_encoder->dcb->i2c_index == idx && edid) { + nv_connector->edid = + kmemdup(edid, EDID_LENGTH, GFP_KERNEL); + of_node_put(cn); + return nv_encoder; + } + } +#endif + return NULL; +} + static void nouveau_connector_set_encoder(struct drm_connector *connector, struct nouveau_encoder *nv_encoder) @@ -225,6 +271,12 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) return connector_status_connected; } + nv_encoder = nouveau_connector_of_detect(connector); + if (nv_encoder) { + nouveau_connector_set_encoder(connector, nv_encoder); + return connector_status_connected; + } + detect_analog: nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); if (!nv_encoder && !nouveau_tv_disable) @@ -630,7 +682,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, else max_clock = nv_encoder->dp.link_nr * 162000; - clock *= 3; + clock = clock * nouveau_connector_bpp(connector) / 8; break; default: BUG_ON(1); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index 0d2e668ccfe5..c21ed6b16f88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -55,4 +55,7 @@ nouveau_connector_create(struct drm_device *, int index); void nouveau_connector_set_polling(struct drm_connector *); +int +nouveau_connector_bpp(struct drm_connector *); + #endif /* __NOUVEAU_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 7933de4aff2e..8e1592368cce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -157,7 +157,23 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data) return 0; } +static int +nouveau_debugfs_evict_vram(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private; + int ret; + + ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); + if (ret) + seq_printf(m, "failed: %d", ret); + else + seq_printf(m, "succeeded\n"); + return 0; +} + static struct drm_info_list nouveau_debugfs_list[] = { + { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL }, { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, { "memory", nouveau_debugfs_memory_info, 0, NULL }, { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 2e3c6caa97ee..82581e600dcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -28,6 +28,7 @@ #include "drm.h" #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_ramht.h" void nouveau_dma_pre_init(struct nouveau_channel *chan) @@ -58,26 +59,17 @@ nouveau_dma_init(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *m2mf = NULL; - struct nouveau_gpuobj *nvsw = NULL; + struct nouveau_gpuobj *obj = NULL; int ret, i; /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? - 0x0039 : 0x5039, &m2mf); + 0x0039 : 0x5039, &obj); if (ret) return ret; - ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL); - if (ret) - return ret; - - /* Create an NV_SW object for various sync purposes */ - ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw); - if (ret) - return ret; - - ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL); + ret = nouveau_ramht_insert(chan, NvM2MF, obj); + nouveau_gpuobj_ref(NULL, &obj); if (ret) return ret; @@ -91,11 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan) if (ret) return ret; - /* Map M2MF notifier object - fbcon. */ - ret = nouveau_bo_map(chan->notifier_bo); - if (ret) - return ret; - /* Insert NOPS for NOUVEAU_DMA_SKIPS */ ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); if (ret) @@ -113,13 +100,6 @@ nouveau_dma_init(struct nouveau_channel *chan) BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1); OUT_RING(chan, NvNotify0); - /* Initialise NV_SW */ - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - BEGIN_RING(chan, NvSubSw, 0, 1); - OUT_RING(chan, NvSw); - /* Sit back and pray the channel works.. */ FIRE_RING(chan); @@ -217,7 +197,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count) chan->dma.ib_free = get - chan->dma.ib_put; if (chan->dma.ib_free <= 0) - chan->dma.ib_free += chan->dma.ib_max + 1; + chan->dma.ib_free += chan->dma.ib_max; } return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index 8b05c15866d5..d578c21d3c8d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -72,6 +72,7 @@ enum { NvGdiRect = 0x8000000c, NvImageBlit = 0x8000000d, NvSw = 0x8000000e, + NvSema = 0x8000000f, /* G80+ display objects */ NvEvoVRAM = 0x01000000, diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 8a1b188b4cd1..4562f309ae3d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -317,7 +317,8 @@ train: return false; config[0] = nv_encoder->dp.link_nr; - if (nv_encoder->dp.dpcd_version >= 0x11) + if (nv_encoder->dp.dpcd_version >= 0x11 && + nv_encoder->dp.enhanced_frame) config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; ret = nouveau_dp_lane_count_set(encoder, config[0]); @@ -468,10 +469,12 @@ nouveau_dp_detect(struct drm_encoder *encoder) !nv_encoder->dcb->dpconf.link_bw) nv_encoder->dp.link_bw = DP_LINK_BW_1_62; - nv_encoder->dp.link_nr = dpcd[2] & 0xf; + nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr) nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr; + nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP); + return true; } @@ -524,7 +527,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); - if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) { + if (!nv_wait(dev, NV50_AUXCH_CTRL(index), + 0x00010000, 0x00000000)) { NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n", nv_rd32(dev, NV50_AUXCH_CTRL(index))); ret = -EBUSY; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index eb15345162a0..90875494a65a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -31,13 +31,14 @@ #include "nouveau_hw.h" #include "nouveau_fb.h" #include "nouveau_fbcon.h" +#include "nouveau_pm.h" #include "nv50_display.h" #include "drm_pciids.h" -MODULE_PARM_DESC(noagp, "Disable AGP"); -int nouveau_noagp; -module_param_named(noagp, nouveau_noagp, int, 0400); +MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)"); +int nouveau_agpmode = -1; +module_param_named(agpmode, nouveau_agpmode, int, 0400); MODULE_PARM_DESC(modeset, "Enable kernel modesetting"); static int nouveau_modeset = -1; /* kms */ @@ -79,6 +80,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); int nouveau_nofbaccel = 0; module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); +MODULE_PARM_DESC(force_post, "Force POST"); +int nouveau_force_post = 0; +module_param_named(force_post, nouveau_force_post, int, 0400); + MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type"); int nouveau_override_conntype = 0; module_param_named(override_conntype, nouveau_override_conntype, int, 0400); @@ -102,6 +107,14 @@ MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n" int nouveau_reg_debug; module_param_named(reg_debug, nouveau_reg_debug, int, 0600); +MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n"); +char *nouveau_perflvl; +module_param_named(perflvl, nouveau_perflvl, charp, 0400); + +MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n"); +int nouveau_perflvl_wr; +module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); + int nouveau_fbpercrtc; #if 0 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); @@ -271,6 +284,8 @@ nouveau_pci_resume(struct pci_dev *pdev) if (ret) return ret; + nouveau_pm_resume(dev); + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { ret = nouveau_mem_init_agp(dev); if (ret) { @@ -379,8 +394,6 @@ static struct drm_driver driver = { .irq_uninstall = nouveau_irq_uninstall, .irq_handler = nouveau_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = nouveau_ioctls, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b1be617373b6..3a07e580d27a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -133,22 +133,24 @@ enum nouveau_flags { #define NVOBJ_ENGINE_DISPLAY 2 #define NVOBJ_ENGINE_INT 0xdeadbeef -#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0) #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) -#define NVOBJ_FLAG_FAKE (1 << 3) struct nouveau_gpuobj { + struct drm_device *dev; + struct kref refcount; struct list_head list; - struct nouveau_channel *im_channel; struct drm_mm_node *im_pramin; struct nouveau_bo *im_backing; - uint32_t im_backing_start; uint32_t *im_backing_suspend; int im_bound; uint32_t flags; - int refcount; + + u32 size; + u32 pinst; + u32 cinst; + u64 vinst; uint32_t engine; uint32_t class; @@ -157,16 +159,6 @@ struct nouveau_gpuobj { void *priv; }; -struct nouveau_gpuobj_ref { - struct list_head list; - - struct nouveau_gpuobj *gpuobj; - uint32_t instance; - - struct nouveau_channel *channel; - int handle; -}; - struct nouveau_channel { struct drm_device *dev; int id; @@ -192,33 +184,32 @@ struct nouveau_channel { } fence; /* DMA push buffer */ - struct nouveau_gpuobj_ref *pushbuf; - struct nouveau_bo *pushbuf_bo; - uint32_t pushbuf_base; + struct nouveau_gpuobj *pushbuf; + struct nouveau_bo *pushbuf_bo; + uint32_t pushbuf_base; /* Notifier memory */ struct nouveau_bo *notifier_bo; struct drm_mm notifier_heap; /* PFIFO context */ - struct nouveau_gpuobj_ref *ramfc; - struct nouveau_gpuobj_ref *cache; + struct nouveau_gpuobj *ramfc; + struct nouveau_gpuobj *cache; /* PGRAPH context */ /* XXX may be merge 2 pointers as private data ??? */ - struct nouveau_gpuobj_ref *ramin_grctx; + struct nouveau_gpuobj *ramin_grctx; void *pgraph_ctx; /* NV50 VM */ - struct nouveau_gpuobj *vm_pd; - struct nouveau_gpuobj_ref *vm_gart_pt; - struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR]; + struct nouveau_gpuobj *vm_pd; + struct nouveau_gpuobj *vm_gart_pt; + struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; /* Objects */ - struct nouveau_gpuobj_ref *ramin; /* Private instmem */ - struct drm_mm ramin_heap; /* Private PRAMIN heap */ - struct nouveau_gpuobj_ref *ramht; /* Hash table */ - struct list_head ramht_refs; /* Objects referenced by RAMHT */ + struct nouveau_gpuobj *ramin; /* Private instmem */ + struct drm_mm ramin_heap; /* Private PRAMIN heap */ + struct nouveau_ramht *ramht; /* Hash table */ /* GPU object info for stuff used in-kernel (mm_enabled) */ uint32_t m2mf_ntfy; @@ -296,7 +287,7 @@ struct nouveau_fb_engine { struct nouveau_fifo_engine { int channels; - struct nouveau_gpuobj_ref *playlist[2]; + struct nouveau_gpuobj *playlist[2]; int cur_playlist; int (*init)(struct drm_device *); @@ -305,7 +296,6 @@ struct nouveau_fifo_engine { void (*disable)(struct drm_device *); void (*enable)(struct drm_device *); bool (*reassign)(struct drm_device *, bool enable); - bool (*cache_flush)(struct drm_device *dev); bool (*cache_pull)(struct drm_device *dev, bool enable); int (*channel_id)(struct drm_device *); @@ -334,7 +324,7 @@ struct nouveau_pgraph_engine { int grctx_size; /* NV2x/NV3x context table (0x400780) */ - struct nouveau_gpuobj_ref *ctx_table; + struct nouveau_gpuobj *ctx_table; int (*init)(struct drm_device *); void (*takedown)(struct drm_device *); @@ -369,6 +359,91 @@ struct nouveau_gpio_engine { void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); }; +struct nouveau_pm_voltage_level { + u8 voltage; + u8 vid; +}; + +struct nouveau_pm_voltage { + bool supported; + u8 vid_mask; + + struct nouveau_pm_voltage_level *level; + int nr_level; +}; + +#define NOUVEAU_PM_MAX_LEVEL 8 +struct nouveau_pm_level { + struct device_attribute dev_attr; + char name[32]; + int id; + + u32 core; + u32 memory; + u32 shader; + u32 unk05; + + u8 voltage; + u8 fanspeed; + + u16 memscript; +}; + +struct nouveau_pm_temp_sensor_constants { + u16 offset_constant; + s16 offset_mult; + u16 offset_div; + u16 slope_mult; + u16 slope_div; +}; + +struct nouveau_pm_threshold_temp { + s16 critical; + s16 down_clock; + s16 fan_boost; +}; + +struct nouveau_pm_memtiming { + u32 reg_100220; + u32 reg_100224; + u32 reg_100228; + u32 reg_10022c; + u32 reg_100230; + u32 reg_100234; + u32 reg_100238; + u32 reg_10023c; +}; + +struct nouveau_pm_memtimings { + bool supported; + struct nouveau_pm_memtiming *timing; + int nr_timing; +}; + +struct nouveau_pm_engine { + struct nouveau_pm_voltage voltage; + struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL]; + int nr_perflvl; + struct nouveau_pm_memtimings memtimings; + struct nouveau_pm_temp_sensor_constants sensor_constants; + struct nouveau_pm_threshold_temp threshold_temp; + + struct nouveau_pm_level boot; + struct nouveau_pm_level *cur; + + struct device *hwmon; + + int (*clock_get)(struct drm_device *, u32 id); + void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, + u32 id, int khz); + void (*clock_set)(struct drm_device *, void *); + int (*voltage_get)(struct drm_device *); + int (*voltage_set)(struct drm_device *, int voltage); + int (*fanspeed_get)(struct drm_device *); + int (*fanspeed_set)(struct drm_device *, int fanspeed); + int (*temp_get)(struct drm_device *); +}; + struct nouveau_engine { struct nouveau_instmem_engine instmem; struct nouveau_mc_engine mc; @@ -378,6 +453,7 @@ struct nouveau_engine { struct nouveau_fifo_engine fifo; struct nouveau_display_engine display; struct nouveau_gpio_engine gpio; + struct nouveau_pm_engine pm; }; struct nouveau_pll_vals { @@ -522,8 +598,14 @@ struct drm_nouveau_private { int flags; void __iomem *mmio; + + spinlock_t ramin_lock; void __iomem *ramin; - uint32_t ramin_size; + u32 ramin_size; + u32 ramin_base; + bool ramin_available; + struct drm_mm ramin_heap; + struct list_head gpuobj_list; struct nouveau_bo *vga_ram; @@ -540,6 +622,12 @@ struct drm_nouveau_private { atomic_t validate_sequence; } ttm; + struct { + spinlock_t lock; + struct drm_mm heap; + struct nouveau_bo *bo; + } fence; + int fifo_alloc_count; struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; @@ -550,15 +638,11 @@ struct drm_nouveau_private { spinlock_t context_switch_lock; /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ - struct nouveau_gpuobj *ramht; + struct nouveau_ramht *ramht; + struct nouveau_gpuobj *ramfc; + struct nouveau_gpuobj *ramro; + uint32_t ramin_rsvd_vram; - uint32_t ramht_offset; - uint32_t ramht_size; - uint32_t ramht_bits; - uint32_t ramfc_offset; - uint32_t ramfc_size; - uint32_t ramro_offset; - uint32_t ramro_size; struct { enum { @@ -576,14 +660,12 @@ struct drm_nouveau_private { } gart_info; /* nv10-nv40 tiling regions */ - struct { - struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; - spinlock_t lock; - } tile; + struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; /* VRAM/fb configuration */ uint64_t vram_size; uint64_t vram_sys_base; + u32 vram_rblock_size; uint64_t fb_phys; uint64_t fb_available_size; @@ -600,10 +682,6 @@ struct drm_nouveau_private { struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; int vm_vram_pt_nr; - struct drm_mm ramin_heap; - - struct list_head gpuobj_list; - struct nvbios vbios; struct nv04_mode_state mode_reg; @@ -633,6 +711,12 @@ struct drm_nouveau_private { struct apertures_struct *apertures; }; +static inline struct drm_nouveau_private * +nouveau_private(struct drm_device *dev) +{ + return dev->dev_private; +} + static inline struct drm_nouveau_private * nouveau_bdev(struct ttm_bo_device *bd) { @@ -669,7 +753,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) } while (0) /* nouveau_drv.c */ -extern int nouveau_noagp; +extern int nouveau_agpmode; extern int nouveau_duallink; extern int nouveau_uscript_lvds; extern int nouveau_uscript_tmds; @@ -683,7 +767,10 @@ extern char *nouveau_vbios; extern int nouveau_ignorelid; extern int nouveau_nofbaccel; extern int nouveau_noaccel; +extern int nouveau_force_post; extern int nouveau_override_conntype; +extern char *nouveau_perflvl; +extern int nouveau_perflvl_wr; extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); extern int nouveau_pci_resume(struct pci_dev *pdev); @@ -704,8 +791,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *); extern int nouveau_card_init(struct drm_device *); /* nouveau_mem.c */ -extern int nouveau_mem_detect(struct drm_device *dev); -extern int nouveau_mem_init(struct drm_device *); +extern int nouveau_mem_vram_init(struct drm_device *); +extern void nouveau_mem_vram_fini(struct drm_device *); +extern int nouveau_mem_gart_init(struct drm_device *); +extern void nouveau_mem_gart_fini(struct drm_device *); extern int nouveau_mem_init_agp(struct drm_device *); extern int nouveau_mem_reset_agp(struct drm_device *); extern void nouveau_mem_close(struct drm_device *); @@ -749,7 +838,6 @@ extern void nouveau_channel_free(struct nouveau_channel *); extern int nouveau_gpuobj_early_init(struct drm_device *); extern int nouveau_gpuobj_init(struct drm_device *); extern void nouveau_gpuobj_takedown(struct drm_device *); -extern void nouveau_gpuobj_late_takedown(struct drm_device *); extern int nouveau_gpuobj_suspend(struct drm_device *dev); extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); extern void nouveau_gpuobj_resume(struct drm_device *dev); @@ -759,24 +847,11 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *, uint32_t size, int align, uint32_t flags, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *, - uint32_t handle, struct nouveau_gpuobj *, - struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_ref_del(struct drm_device *, - struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle, - struct nouveau_gpuobj_ref **ref_ret); -extern int nouveau_gpuobj_new_ref(struct drm_device *, - struct nouveau_channel *alloc_chan, - struct nouveau_channel *ref_chan, - uint32_t handle, uint32_t size, int align, - uint32_t flags, struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_new_fake(struct drm_device *, - uint32_t p_offset, uint32_t b_offset, - uint32_t size, uint32_t flags, - struct nouveau_gpuobj **, - struct nouveau_gpuobj_ref**); +extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *, + struct nouveau_gpuobj **); +extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst, + u32 size, u32 flags, + struct nouveau_gpuobj **); extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, uint64_t offset, uint64_t size, int access, int target, struct nouveau_gpuobj **); @@ -879,6 +954,7 @@ extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *, enum dcb_gpio_tag); extern struct dcb_connector_table_entry * nouveau_bios_connector_entry(struct drm_device *, int index); +extern u32 get_pll_register(struct drm_device *, enum pll_types); extern int get_pll_limits(struct drm_device *, uint32_t limit_match, struct pll_lims *); extern int nouveau_bios_run_display_table(struct drm_device *, @@ -925,10 +1001,10 @@ extern int nv40_fb_init(struct drm_device *); extern void nv40_fb_takedown(struct drm_device *); extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, uint32_t, uint32_t); - /* nv50_fb.c */ extern int nv50_fb_init(struct drm_device *); extern void nv50_fb_takedown(struct drm_device *); +extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *); /* nvc0_fb.c */ extern int nvc0_fb_init(struct drm_device *); @@ -939,7 +1015,6 @@ extern int nv04_fifo_init(struct drm_device *); extern void nv04_fifo_disable(struct drm_device *); extern void nv04_fifo_enable(struct drm_device *); extern bool nv04_fifo_reassign(struct drm_device *, bool); -extern bool nv04_fifo_cache_flush(struct drm_device *); extern bool nv04_fifo_cache_pull(struct drm_device *, bool); extern int nv04_fifo_channel_id(struct drm_device *); extern int nv04_fifo_create_context(struct nouveau_channel *); @@ -977,7 +1052,6 @@ extern void nvc0_fifo_takedown(struct drm_device *); extern void nvc0_fifo_disable(struct drm_device *); extern void nvc0_fifo_enable(struct drm_device *); extern bool nvc0_fifo_reassign(struct drm_device *, bool); -extern bool nvc0_fifo_cache_flush(struct drm_device *); extern bool nvc0_fifo_cache_pull(struct drm_device *, bool); extern int nvc0_fifo_channel_id(struct drm_device *); extern int nvc0_fifo_create_context(struct nouveau_channel *); @@ -1169,15 +1243,21 @@ extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); /* nouveau_fence.c */ struct nouveau_fence; -extern int nouveau_fence_init(struct nouveau_channel *); -extern void nouveau_fence_fini(struct nouveau_channel *); +extern int nouveau_fence_init(struct drm_device *); +extern void nouveau_fence_fini(struct drm_device *); +extern int nouveau_fence_channel_init(struct nouveau_channel *); +extern void nouveau_fence_channel_fini(struct nouveau_channel *); extern void nouveau_fence_update(struct nouveau_channel *); extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **, bool emit); extern int nouveau_fence_emit(struct nouveau_fence *); +extern void nouveau_fence_work(struct nouveau_fence *fence, + void (*work)(void *priv, bool signalled), + void *priv); struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); extern bool nouveau_fence_signalled(void *obj, void *arg); extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); +extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); extern int nouveau_fence_flush(void *obj, void *arg); extern void nouveau_fence_unref(void **obj); extern void *nouveau_fence_ref(void *obj); @@ -1255,12 +1335,11 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val) iowrite32_native(val, dev_priv->mmio + reg); } -static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val) +static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val) { u32 tmp = nv_rd32(dev, reg); - tmp &= ~mask; - tmp |= val; - nv_wr32(dev, reg, tmp); + nv_wr32(dev, reg, (tmp & ~mask) | val); + return tmp; } static inline u8 nv_rd08(struct drm_device *dev, unsigned reg) @@ -1275,7 +1354,7 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val) iowrite8(val, dev_priv->mmio + reg); } -#define nv_wait(reg, mask, val) \ +#define nv_wait(dev, reg, mask, val) \ nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) /* PRAMIN access */ @@ -1292,17 +1371,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val) } /* object access */ -static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj, - unsigned index) -{ - return nv_ri32(dev, obj->im_pramin->start + index * 4); -} - -static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj, - unsigned index, u32 val) -{ - nv_wi32(dev, obj->im_pramin->start + index * 4, val); -} +extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset); +extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val); /* * Logging @@ -1403,6 +1473,7 @@ nv_match_device(struct drm_device *dev, unsigned device, #define NV_SW_SEMAPHORE_OFFSET 0x00000064 #define NV_SW_SEMAPHORE_ACQUIRE 0x00000068 #define NV_SW_SEMAPHORE_RELEASE 0x0000006c +#define NV_SW_YIELD 0x00000080 #define NV_SW_DMA_VBLSEM 0x0000018c #define NV_SW_VBLSEM_OFFSET 0x00000400 #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index 7c82d68bc155..ae69b61d93db 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -55,6 +55,7 @@ struct nouveau_encoder { int dpcd_version; int link_nr; int link_bw; + bool enhanced_frame; } dp; }; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index dbd30b2e43fd..02a4d1fd4845 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -104,6 +104,8 @@ static struct fb_ops nouveau_fbcon_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; static struct fb_ops nv04_fbcon_ops = { @@ -117,6 +119,8 @@ static struct fb_ops nv04_fbcon_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; static struct fb_ops nv50_fbcon_ops = { @@ -130,6 +134,8 @@ static struct fb_ops nv50_fbcon_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 87ac21ec23d2..441b12420bb1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -28,9 +28,11 @@ #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" #include "nouveau_dma.h" -#define USE_REFCNT (dev_priv->card_type >= NV_10) +#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) +#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) struct nouveau_fence { struct nouveau_channel *channel; @@ -39,6 +41,15 @@ struct nouveau_fence { uint32_t sequence; bool signalled; + + void (*work)(void *priv, bool signalled); + void *priv; +}; + +struct nouveau_semaphore { + struct kref ref; + struct drm_device *dev; + struct drm_mm_node *mem; }; static inline struct nouveau_fence * @@ -59,14 +70,13 @@ nouveau_fence_del(struct kref *ref) void nouveau_fence_update(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct list_head *entry, *tmp; - struct nouveau_fence *fence; + struct drm_device *dev = chan->dev; + struct nouveau_fence *tmp, *fence; uint32_t sequence; spin_lock(&chan->fence.lock); - if (USE_REFCNT) + if (USE_REFCNT(dev)) sequence = nvchan_rd32(chan, 0x48); else sequence = atomic_read(&chan->fence.last_sequence_irq); @@ -75,12 +85,14 @@ nouveau_fence_update(struct nouveau_channel *chan) goto out; chan->fence.sequence_ack = sequence; - list_for_each_safe(entry, tmp, &chan->fence.pending) { - fence = list_entry(entry, struct nouveau_fence, entry); - + list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { sequence = fence->sequence; fence->signalled = true; list_del(&fence->entry); + + if (unlikely(fence->work)) + fence->work(fence->priv, true); + kref_put(&fence->refcount, nouveau_fence_del); if (sequence == chan->fence.sequence_ack) @@ -121,8 +133,8 @@ nouveau_fence_channel(struct nouveau_fence *fence) int nouveau_fence_emit(struct nouveau_fence *fence) { - struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private; struct nouveau_channel *chan = fence->channel; + struct drm_device *dev = chan->dev; int ret; ret = RING_SPACE(chan, 2); @@ -143,13 +155,32 @@ nouveau_fence_emit(struct nouveau_fence *fence) list_add_tail(&fence->entry, &chan->fence.pending); spin_unlock(&chan->fence.lock); - BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); + BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1); OUT_RING(chan, fence->sequence); FIRE_RING(chan); return 0; } +void +nouveau_fence_work(struct nouveau_fence *fence, + void (*work)(void *priv, bool signalled), + void *priv) +{ + BUG_ON(fence->work); + + spin_lock(&fence->channel->fence.lock); + + if (fence->signalled) { + work(priv, true); + } else { + fence->work = work; + fence->priv = priv; + } + + spin_unlock(&fence->channel->fence.lock); +} + void nouveau_fence_unref(void **sync_obj) { @@ -213,6 +244,162 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) return ret; } +static struct nouveau_semaphore * +alloc_semaphore(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_semaphore *sema; + + if (!USE_SEMA(dev)) + return NULL; + + sema = kmalloc(sizeof(*sema), GFP_KERNEL); + if (!sema) + goto fail; + + spin_lock(&dev_priv->fence.lock); + sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); + if (sema->mem) + sema->mem = drm_mm_get_block(sema->mem, 4, 0); + spin_unlock(&dev_priv->fence.lock); + + if (!sema->mem) + goto fail; + + kref_init(&sema->ref); + sema->dev = dev; + nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0); + + return sema; +fail: + kfree(sema); + return NULL; +} + +static void +free_semaphore(struct kref *ref) +{ + struct nouveau_semaphore *sema = + container_of(ref, struct nouveau_semaphore, ref); + struct drm_nouveau_private *dev_priv = sema->dev->dev_private; + + spin_lock(&dev_priv->fence.lock); + drm_mm_put_block(sema->mem); + spin_unlock(&dev_priv->fence.lock); + + kfree(sema); +} + +static void +semaphore_work(void *priv, bool signalled) +{ + struct nouveau_semaphore *sema = priv; + struct drm_nouveau_private *dev_priv = sema->dev->dev_private; + + if (unlikely(!signalled)) + nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1); + + kref_put(&sema->ref, free_semaphore); +} + +static int +emit_semaphore(struct nouveau_channel *chan, int method, + struct nouveau_semaphore *sema) +{ + struct drm_nouveau_private *dev_priv = sema->dev->dev_private; + struct nouveau_fence *fence; + bool smart = (dev_priv->card_type >= NV_50); + int ret; + + ret = RING_SPACE(chan, smart ? 8 : 4); + if (ret) + return ret; + + if (smart) { + BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1); + OUT_RING(chan, NvSema); + } + BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1); + OUT_RING(chan, sema->mem->start); + + if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) { + /* + * NV50 tries to be too smart and context-switch + * between semaphores instead of doing a "first come, + * first served" strategy like previous cards + * do. + * + * That's bad because the ACQUIRE latency can get as + * large as the PFIFO context time slice in the + * typical DRI2 case where you have several + * outstanding semaphores at the same moment. + * + * If we're going to ACQUIRE, force the card to + * context switch before, just in case the matching + * RELEASE is already scheduled to be executed in + * another channel. + */ + BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1); + OUT_RING(chan, 0); + } + + BEGIN_RING(chan, NvSubSw, method, 1); + OUT_RING(chan, 1); + + if (smart && method == NV_SW_SEMAPHORE_RELEASE) { + /* + * Force the card to context switch, there may be + * another channel waiting for the semaphore we just + * released. + */ + BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1); + OUT_RING(chan, 0); + } + + /* Delay semaphore destruction until its work is done */ + ret = nouveau_fence_new(chan, &fence, true); + if (ret) + return ret; + + kref_get(&sema->ref); + nouveau_fence_work(fence, semaphore_work, sema); + nouveau_fence_unref((void *)&fence); + + return 0; +} + +int +nouveau_fence_sync(struct nouveau_fence *fence, + struct nouveau_channel *wchan) +{ + struct nouveau_channel *chan = nouveau_fence_channel(fence); + struct drm_device *dev = wchan->dev; + struct nouveau_semaphore *sema; + int ret; + + if (likely(!fence || chan == wchan || + nouveau_fence_signalled(fence, NULL))) + return 0; + + sema = alloc_semaphore(dev); + if (!sema) { + /* Early card or broken userspace, fall back to + * software sync. */ + return nouveau_fence_wait(fence, NULL, false, false); + } + + /* Make wchan wait until it gets signalled */ + ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); + if (ret) + goto out; + + /* Signal the semaphore from chan */ + ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); +out: + kref_put(&sema->ref, free_semaphore); + return ret; +} + int nouveau_fence_flush(void *sync_obj, void *sync_arg) { @@ -220,26 +407,123 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg) } int -nouveau_fence_init(struct nouveau_channel *chan) +nouveau_fence_channel_init(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *obj = NULL; + int ret; + + /* Create an NV_SW object for various sync purposes */ + ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); + if (ret) + return ret; + + ret = nouveau_ramht_insert(chan, NvSw, obj); + nouveau_gpuobj_ref(NULL, &obj); + if (ret) + return ret; + + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + BEGIN_RING(chan, NvSubSw, 0, 1); + OUT_RING(chan, NvSw); + + /* Create a DMA object for the shared cross-channel sync area. */ + if (USE_SEMA(dev)) { + struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node; + + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + mem->start << PAGE_SHIFT, + mem->size << PAGE_SHIFT, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM, &obj); + if (ret) + return ret; + + ret = nouveau_ramht_insert(chan, NvSema, obj); + nouveau_gpuobj_ref(NULL, &obj); + if (ret) + return ret; + + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1); + OUT_RING(chan, NvSema); + } + + FIRE_RING(chan); + INIT_LIST_HEAD(&chan->fence.pending); spin_lock_init(&chan->fence.lock); atomic_set(&chan->fence.last_sequence_irq, 0); + return 0; } void -nouveau_fence_fini(struct nouveau_channel *chan) +nouveau_fence_channel_fini(struct nouveau_channel *chan) { - struct list_head *entry, *tmp; - struct nouveau_fence *fence; - - list_for_each_safe(entry, tmp, &chan->fence.pending) { - fence = list_entry(entry, struct nouveau_fence, entry); + struct nouveau_fence *tmp, *fence; + list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { fence->signalled = true; list_del(&fence->entry); + + if (unlikely(fence->work)) + fence->work(fence->priv, false); + kref_put(&fence->refcount, nouveau_fence_del); } } +int +nouveau_fence_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + /* Create a shared VRAM heap for cross-channel sync. */ + if (USE_SEMA(dev)) { + ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, + 0, 0, false, true, &dev_priv->fence.bo); + if (ret) + return ret; + + ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM); + if (ret) + goto fail; + + ret = nouveau_bo_map(dev_priv->fence.bo); + if (ret) + goto fail; + + ret = drm_mm_init(&dev_priv->fence.heap, 0, + dev_priv->fence.bo->bo.mem.size); + if (ret) + goto fail; + + spin_lock_init(&dev_priv->fence.lock); + } + + return 0; +fail: + nouveau_bo_unmap(dev_priv->fence.bo); + nouveau_bo_ref(NULL, &dev_priv->fence.bo); + return ret; +} + +void +nouveau_fence_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (USE_SEMA(dev)) { + drm_mm_takedown(&dev_priv->fence.heap); + nouveau_bo_unmap(dev_priv->fence.bo); + nouveau_bo_unpin(dev_priv->fence.bo); + nouveau_bo_ref(NULL, &dev_priv->fence.bo); + } +} diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 19620a6709f5..5c4c929d7f74 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -362,7 +362,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, list_for_each_entry(nvbo, list, entry) { struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; - ret = nouveau_bo_sync_gpu(nvbo, chan); + ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); if (unlikely(ret)) { NV_ERROR(dev, "fail pre-validate sync\n"); return ret; @@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, return ret; } - ret = nouveau_bo_sync_gpu(nvbo, chan); + ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); if (unlikely(ret)) { NV_ERROR(dev, "fail post-validate sync\n"); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h index 5d39c4ce8006..4a8ad1307fa4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.h +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h @@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val) reg = (reg - 0x00400000) / 4; reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; - nv_wo32(ctx->dev, ctx->data, reg, val); + nv_wo32(ctx->data, reg * 4, val); } #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index 7b613682e400..bed669a54a2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -305,7 +305,7 @@ setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg, bool mpll = Preg == 0x4020; uint32_t oldPval = nvReadMC(dev, Preg); uint32_t NMNM = pv->NM2 << 16 | pv->NM1; - uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) | + uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) | 0xc << 28 | pv->log2P << 16; uint32_t saved4600 = 0; /* some cards have different maskc040s */ @@ -427,22 +427,12 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype, struct nouveau_pll_vals *pllvals) { struct drm_nouveau_private *dev_priv = dev->dev_private; - const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF, - NV_PRAMDAC_MPLL_COEFF, - NV_PRAMDAC_VPLL_COEFF, - NV_RAMDAC_VPLL2 }; - const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000, - 0x4020, - NV_PRAMDAC_VPLL_COEFF, - NV_RAMDAC_VPLL2 }; - uint32_t reg1, pll1, pll2 = 0; + uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0; struct pll_lims pll_lim; int ret; - if (dev_priv->card_type < NV_40) - reg1 = nv04_regs[plltype]; - else - reg1 = nv40_regs[plltype]; + if (reg1 == 0) + return -ENOENT; pll1 = nvReadMC(dev, reg1); @@ -491,8 +481,10 @@ int nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) { struct nouveau_pll_vals pllvals; + int ret; - if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { + if (plltype == PLL_MEMORY && + (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) { uint32_t mpllP; pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); @@ -501,14 +493,17 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) return 400000 / mpllP; } else - if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { + if (plltype == PLL_MEMORY && + (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) { uint32_t clock; pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); return clock; } - nouveau_hw_get_pllvals(dev, plltype, &pllvals); + ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); + if (ret) + return ret; return nouveau_hw_pllvals_to_clk(&pllvals); } @@ -526,9 +521,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) struct nouveau_pll_vals pv; uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; - if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim)) + if (get_pll_limits(dev, pllreg, &pll_lim)) return; - nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv); + nouveau_hw_get_pllvals(dev, pllreg, &pv); if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && @@ -661,7 +656,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head, if (dev_priv->card_type >= NV_10) regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); - nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, ®p->pllvals); + nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); if (nv_two_heads(dev)) state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); @@ -866,10 +861,11 @@ nv_save_state_ext(struct drm_device *dev, int head, rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_21); - if (dev_priv->card_type >= NV_30) { + if (dev_priv->card_type >= NV_20) rd_cio_state(dev, head, regp, NV_CIO_CRE_47); + + if (dev_priv->card_type >= NV_30) rd_cio_state(dev, head, regp, 0x9f); - } rd_cio_state(dev, head, regp, NV_CIO_CRE_49); rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); @@ -976,10 +972,11 @@ nv_load_state_ext(struct drm_device *dev, int head, wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); - if (dev_priv->card_type >= NV_30) { + if (dev_priv->card_type >= NV_20) wr_cio_state(dev, head, regp, NV_CIO_CRE_47); + + if (dev_priv->card_type >= NV_30) wr_cio_state(dev, head, regp, 0x9f); - } wr_cio_state(dev, head, regp, NV_CIO_CRE_49); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 84614858728b..fdd7e3de79c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -299,7 +299,10 @@ nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr) int nouveau_i2c_identify(struct drm_device *dev, const char *what, - struct i2c_board_info *info, int index) + struct i2c_board_info *info, + bool (*match)(struct nouveau_i2c_chan *, + struct i2c_board_info *), + int index) { struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index); int i; @@ -307,7 +310,8 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what, NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); for (i = 0; info[i].addr; i++) { - if (nouveau_probe_i2c_addr(i2c, info[i].addr)) { + if (nouveau_probe_i2c_addr(i2c, info[i].addr) && + (!match || match(i2c, &info[i]))) { NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); return i; } diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h index cfe7c8426d1d..422b62fd8272 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.h +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h @@ -43,7 +43,10 @@ void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *); struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index); bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr); int nouveau_i2c_identify(struct drm_device *dev, const char *what, - struct i2c_board_info *info, int index); + struct i2c_board_info *info, + bool (*match)(struct nouveau_i2c_chan *, + struct i2c_board_info *), + int index); extern const struct i2c_algorithm nouveau_dp_i2c_algo; diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 794b0ee30cf6..6fd51a51c608 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -35,6 +35,7 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" #include "nouveau_reg.h" +#include "nouveau_ramht.h" #include /* needed for hotplug irq */ @@ -106,15 +107,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data) const int mthd = addr & 0x1ffc; if (mthd == 0x0000) { - struct nouveau_gpuobj_ref *ref = NULL; + struct nouveau_gpuobj *gpuobj; - if (nouveau_gpuobj_ref_find(chan, data, &ref)) + gpuobj = nouveau_ramht_find(chan, data); + if (!gpuobj) return false; - if (ref->gpuobj->engine != NVOBJ_ENGINE_SW) + if (gpuobj->engine != NVOBJ_ENGINE_SW) return false; - chan->sw_subchannel[subc] = ref->gpuobj->class; + chan->sw_subchannel[subc] = gpuobj->class; nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); return true; @@ -200,16 +202,45 @@ nouveau_fifo_irq_handler(struct drm_device *dev) } if (status & NV_PFIFO_INTR_DMA_PUSHER) { - NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid); + u32 get = nv_rd32(dev, 0x003244); + u32 put = nv_rd32(dev, 0x003240); + u32 push = nv_rd32(dev, 0x003220); + u32 state = nv_rd32(dev, 0x003228); + if (dev_priv->card_type == NV_50) { + u32 ho_get = nv_rd32(dev, 0x003328); + u32 ho_put = nv_rd32(dev, 0x003320); + u32 ib_get = nv_rd32(dev, 0x003334); + u32 ib_put = nv_rd32(dev, 0x003330); + + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " + "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " + "State 0x%08x Push 0x%08x\n", + chid, ho_get, get, ho_put, put, ib_get, ib_put, + state, push); + + /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ + nv_wr32(dev, 0x003364, 0x00000000); + if (get != put || ho_get != ho_put) { + nv_wr32(dev, 0x003244, put); + nv_wr32(dev, 0x003328, ho_put); + } else + if (ib_get != ib_put) { + nv_wr32(dev, 0x003334, ib_put); + } + } else { + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " + "Put 0x%08x State 0x%08x Push 0x%08x\n", + chid, get, put, state, push); + + if (get != put) + nv_wr32(dev, 0x003244, put); + } + + nv_wr32(dev, 0x003228, 0x00000000); + nv_wr32(dev, 0x003220, 0x00000001); + nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); status &= ~NV_PFIFO_INTR_DMA_PUSHER; - nv_wr32(dev, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_DMA_PUSHER); - - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000); - if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get) - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, - get + 4); } if (status & NV_PFIFO_INTR_SEMAPHORE) { @@ -226,6 +257,14 @@ nouveau_fifo_irq_handler(struct drm_device *dev) nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); } + if (dev_priv->card_type == NV_50) { + if (status & 0x00000010) { + nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); + status &= ~0x00000010; + nv_wr32(dev, 0x002100, 0x00000010); + } + } + if (status) { NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", status, chid); @@ -357,7 +396,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev) if (!chan || !chan->ramin_grctx) continue; - if (inst == chan->ramin_grctx->instance) + if (inst == chan->ramin_grctx->pinst) break; } } else { @@ -369,7 +408,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev) if (!chan || !chan->ramin) continue; - if (inst == chan->ramin->instance) + if (inst == chan->ramin->vinst) break; } } @@ -605,40 +644,6 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); } -static void -nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t trap[6]; - int i, ch; - uint32_t idx = nv_rd32(dev, 0x100c90); - if (idx & 0x80000000) { - idx &= 0xffffff; - if (display) { - for (i = 0; i < 6; i++) { - nv_wr32(dev, 0x100c90, idx | i << 24); - trap[i] = nv_rd32(dev, 0x100c94); - } - for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { - struct nouveau_channel *chan = dev_priv->fifos[ch]; - - if (!chan || !chan->ramin) - continue; - - if (trap[1] == chan->ramin->instance >> 12) - break; - } - NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n", - name, (trap[5]&0x100?"read":"write"), - trap[5]&0xff, trap[4]&0xffff, - trap[3]&0xffff, trap[0], trap[2], ch); - } - nv_wr32(dev, 0x100c90, idx | 0x80000000); - } else if (display) { - NV_INFO(dev, "%s - no VM fault?\n", name); - } -} - static struct nouveau_enum_names nv50_mp_exec_error_names[] = { { 3, "STACK_UNDERFLOW" }, @@ -711,7 +716,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, tps++; switch (type) { case 6: /* texture error... unknown for now */ - nv50_pfb_vm_trap(dev, display, name); + nv50_fb_vm_trap(dev, display, name); if (display) { NV_ERROR(dev, "magic set %d:\n", i); for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) @@ -734,7 +739,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); - nv50_pfb_vm_trap(dev, display, name); + nv50_fb_vm_trap(dev, display, name); /* 2d engine destination */ if (ustatus & 0x00000010) { if (display) { @@ -817,7 +822,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) /* Known to be triggered by screwed up NOTIFY and COND... */ if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); nv_wr32(dev, 0x400500, 0); if (nv_rd32(dev, 0x400808) & 0x80000000) { if (display) { @@ -842,7 +847,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) ustatus &= ~0x00000001; } if (ustatus & 0x00000002) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); nv_wr32(dev, 0x400500, 0); if (nv_rd32(dev, 0x40084c) & 0x80000000) { if (display) { @@ -884,15 +889,15 @@ nv50_pgraph_trap_handler(struct drm_device *dev) NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); } if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); ustatus &= ~0x00000001; } if (ustatus & 0x00000002) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); ustatus &= ~0x00000002; } if (ustatus & 0x00000004) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); ustatus &= ~0x00000004; } NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", @@ -917,7 +922,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); } if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), @@ -939,7 +944,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); } if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), @@ -964,7 +969,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); } if (ustatus & 0x00000001) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804), @@ -986,7 +991,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) * remaining, so try to handle it anyway. Perhaps related to that * unknown DMA slot on tesla? */ if (status & 0x20) { - nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); + nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; if (display) NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 9689d4147686..a163c7c612e7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -35,6 +35,8 @@ #include "drm_sarea.h" #include "nouveau_drv.h" +#define MIN(a,b) a < b ? a : b + /* * NV10-NV40 tiling helpers */ @@ -47,18 +49,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + struct nouveau_tile_reg *tile = &dev_priv->tile[i]; tile->addr = addr; tile->size = size; tile->used = !!pitch; nouveau_fence_unref((void **)&tile->fence); - if (!pfifo->cache_flush(dev)) - return; - pfifo->reassign(dev, false); - pfifo->cache_flush(dev); pfifo->cache_pull(dev, false); nouveau_wait_for_idle(dev); @@ -76,34 +74,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; - struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL; - int i; + struct nouveau_tile_reg *found = NULL; + unsigned long i, flags; - spin_lock(&dev_priv->tile.lock); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); for (i = 0; i < pfb->num_tiles; i++) { - if (tile[i].used) + struct nouveau_tile_reg *tile = &dev_priv->tile[i]; + + if (tile->used) /* Tile region in use. */ continue; - if (tile[i].fence && - !nouveau_fence_signalled(tile[i].fence, NULL)) + if (tile->fence && + !nouveau_fence_signalled(tile->fence, NULL)) /* Pending tile region. */ continue; - if (max(tile[i].addr, addr) < - min(tile[i].addr + tile[i].size, addr + size)) + if (max(tile->addr, addr) < + min(tile->addr + tile->size, addr + size)) /* Kill an intersecting tile region. */ nv10_mem_set_region_tiling(dev, i, 0, 0, 0); if (pitch && !found) { /* Free tile region. */ nv10_mem_set_region_tiling(dev, i, addr, size, pitch); - found = &tile[i]; + found = tile; } } - spin_unlock(&dev_priv->tile.lock); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return found; } @@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, virt += (end - pte); while (pte < end) { - nv_wo32(dev, pgt, pte++, offset_l); - nv_wo32(dev, pgt, pte++, offset_h); + nv_wo32(pgt, (pte * 4) + 0, offset_l); + nv_wo32(pgt, (pte * 4) + 4, offset_h); + pte += 2; } } } @@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) pages -= (end - pte); virt += (end - pte) << 15; - while (pte < end) - nv_wo32(dev, pgt, pte++, 0); + while (pte < end) { + nv_wo32(pgt, (pte * 4), 0); + pte++; + } } dev_priv->engine.instmem.flush(dev); @@ -218,7 +221,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) * Cleanup everything */ void -nouveau_mem_close(struct drm_device *dev) +nouveau_mem_vram_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -229,6 +232,19 @@ nouveau_mem_close(struct drm_device *dev) nouveau_ttm_global_release(dev_priv); + if (dev_priv->fb_mtrr >= 0) { + drm_mtrr_del(dev_priv->fb_mtrr, + pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); + dev_priv->fb_mtrr = -1; + } +} + +void +nouveau_mem_gart_fini(struct drm_device *dev) +{ + nouveau_sgdma_takedown(dev); + if (drm_core_has_AGP(dev) && dev->agp) { struct drm_agp_mem *entry, *tempe; @@ -248,13 +264,6 @@ nouveau_mem_close(struct drm_device *dev) dev->agp->acquired = 0; dev->agp->enabled = 0; } - - if (dev_priv->fb_mtrr) { - drm_mtrr_del(dev_priv->fb_mtrr, - pci_resource_start(dev->pdev, 1), - pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); - dev_priv->fb_mtrr = -1; - } } static uint32_t @@ -305,8 +314,62 @@ nouveau_mem_detect_nforce(struct drm_device *dev) return 0; } -/* returns the amount of FB ram in bytes */ -int +static void +nv50_vram_preinit(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i, parts, colbits, rowbitsa, rowbitsb, banks; + u64 rowsize, predicted; + u32 r0, r4, rt, ru; + + r0 = nv_rd32(dev, 0x100200); + r4 = nv_rd32(dev, 0x100204); + rt = nv_rd32(dev, 0x100250); + ru = nv_rd32(dev, 0x001540); + NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); + + for (i = 0, parts = 0; i < 8; i++) { + if (ru & (0x00010000 << i)) + parts++; + } + + colbits = (r4 & 0x0000f000) >> 12; + rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; + rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; + banks = ((r4 & 0x01000000) ? 8 : 4); + + rowsize = parts * banks * (1 << colbits) * 8; + predicted = rowsize << rowbitsa; + if (r0 & 0x00000004) + predicted += rowsize << rowbitsb; + + if (predicted != dev_priv->vram_size) { + NV_WARN(dev, "memory controller reports %dMiB VRAM\n", + (u32)(dev_priv->vram_size >> 20)); + NV_WARN(dev, "we calculated %dMiB VRAM\n", + (u32)(predicted >> 20)); + } + + dev_priv->vram_rblock_size = rowsize >> 12; + if (rt & 1) + dev_priv->vram_rblock_size *= 3; + + NV_DEBUG(dev, "rblock %lld bytes\n", + (u64)dev_priv->vram_rblock_size << 12); +} + +static void +nvaa_vram_preinit(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* To our knowledge, there's no large scale reordering of pages + * that occurs on IGP chipsets. + */ + dev_priv->vram_rblock_size = 1; +} + +static int nouveau_mem_detect(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -325,9 +388,18 @@ nouveau_mem_detect(struct drm_device *dev) dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; dev_priv->vram_size &= 0xffffffff00ll; - if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { + + switch (dev_priv->chipset) { + case 0xaa: + case 0xac: + case 0xaf: dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); dev_priv->vram_sys_base <<= 12; + nvaa_vram_preinit(dev); + break; + default: + nv50_vram_preinit(dev); + break; } } else { dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; @@ -345,6 +417,33 @@ nouveau_mem_detect(struct drm_device *dev) return -ENOMEM; } +#if __OS_HAS_AGP +static unsigned long +get_agp_mode(struct drm_device *dev, unsigned long mode) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* + * FW seems to be broken on nv18, it makes the card lock up + * randomly. + */ + if (dev_priv->chipset == 0x18) + mode &= ~PCI_AGP_COMMAND_FW; + + /* + * AGP mode set in the command line. + */ + if (nouveau_agpmode > 0) { + bool agpv3 = mode & 0x8; + int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; + + mode = (mode & ~0x7) | (rate & 0x7); + } + + return mode; +} +#endif + int nouveau_mem_reset_agp(struct drm_device *dev) { @@ -355,7 +454,8 @@ nouveau_mem_reset_agp(struct drm_device *dev) /* First of all, disable fast writes, otherwise if it's * already enabled in the AGP bridge and we disable the card's * AGP controller we might be locking ourselves out of it. */ - if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) { + if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) | + dev->agp->mode) & PCI_AGP_COMMAND_FW) { struct drm_agp_info info; struct drm_agp_mode mode; @@ -363,7 +463,7 @@ nouveau_mem_reset_agp(struct drm_device *dev) if (ret) return ret; - mode.mode = info.mode & ~PCI_AGP_COMMAND_FW; + mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW; ret = drm_agp_enable(dev, mode); if (ret) return ret; @@ -418,7 +518,7 @@ nouveau_mem_init_agp(struct drm_device *dev) } /* see agp.h for the AGPSTAT_* modes available */ - mode.mode = info.mode; + mode.mode = get_agp_mode(dev, info.mode); ret = drm_agp_enable(dev, mode); if (ret) { NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); @@ -433,24 +533,27 @@ nouveau_mem_init_agp(struct drm_device *dev) } int -nouveau_mem_init(struct drm_device *dev) +nouveau_mem_vram_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; - int ret, dma_bits = 32; - - dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); - dev_priv->gart_info.type = NOUVEAU_GART_NONE; + int ret, dma_bits; if (dev_priv->card_type >= NV_50 && pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) dma_bits = 40; + else + dma_bits = 32; ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); - if (ret) { - NV_ERROR(dev, "Error setting DMA mask: %d\n", ret); + if (ret) return ret; - } + + ret = nouveau_mem_detect(dev); + if (ret) + return ret; + + dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); ret = nouveau_ttm_global_init(dev_priv); if (ret) @@ -465,8 +568,6 @@ nouveau_mem_init(struct drm_device *dev) return ret; } - spin_lock_init(&dev_priv->tile.lock); - dev_priv->fb_available_size = dev_priv->vram_size; dev_priv->fb_mappable_pages = dev_priv->fb_available_size; if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) @@ -474,7 +575,16 @@ nouveau_mem_init(struct drm_device *dev) pci_resource_len(dev->pdev, 1); dev_priv->fb_mappable_pages >>= PAGE_SHIFT; - /* remove reserved space at end of vram from available amount */ + /* reserve space at end of VRAM for PRAMIN */ + if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || + dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) + dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024); + else + if (dev_priv->card_type >= NV_40) + dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024); + else + dev_priv->ramin_rsvd_vram = (512 * 1024); + dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_aper_free = dev_priv->fb_available_size; @@ -495,9 +605,23 @@ nouveau_mem_init(struct drm_device *dev) nouveau_bo_ref(NULL, &dev_priv->vga_ram); } - /* GART */ + dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1), + DRM_MTRR_WC); + return 0; +} + +int +nouveau_mem_gart_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; + int ret; + + dev_priv->gart_info.type = NOUVEAU_GART_NONE; + #if !defined(__powerpc__) && !defined(__ia64__) - if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) { + if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) { ret = nouveau_mem_init_agp(dev); if (ret) NV_ERROR(dev, "Error initialising AGP: %d\n", ret); @@ -523,11 +647,150 @@ nouveau_mem_init(struct drm_device *dev) return ret; } - dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), - pci_resource_len(dev->pdev, 1), - DRM_MTRR_WC); - return 0; } +void +nouveau_mem_timing_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_memtimings *memtimings = &pm->memtimings; + struct nvbios *bios = &dev_priv->vbios; + struct bit_entry P; + u8 tUNK_0, tUNK_1, tUNK_2; + u8 tRP; /* Byte 3 */ + u8 tRAS; /* Byte 5 */ + u8 tRFC; /* Byte 7 */ + u8 tRC; /* Byte 9 */ + u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; + u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; + u8 *mem = NULL, *entry; + int i, recordlen, entries; + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version == 1) + mem = ROMPTR(bios, P.data[4]); + else + if (P.version == 2) + mem = ROMPTR(bios, P.data[8]); + else { + NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); + } + } else { + NV_DEBUG(dev, "BMP version too old for memory\n"); + return; + } + + if (!mem) { + NV_DEBUG(dev, "memory timing table pointer invalid\n"); + return; + } + + if (mem[0] != 0x10) { + NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]); + return; + } + + /* validate record length */ + entries = mem[2]; + recordlen = mem[3]; + if (recordlen < 15) { + NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]); + return; + } + + /* parse vbios entries into common format */ + memtimings->timing = + kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); + if (!memtimings->timing) + return; + + entry = mem + mem[1]; + for (i = 0; i < entries; i++, entry += recordlen) { + struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; + if (entry[0] == 0) + continue; + + tUNK_18 = 1; + tUNK_19 = 1; + tUNK_20 = 0; + tUNK_21 = 0; + switch (MIN(recordlen,21)) { + case 21: + tUNK_21 = entry[21]; + case 20: + tUNK_20 = entry[20]; + case 19: + tUNK_19 = entry[19]; + case 18: + tUNK_18 = entry[18]; + default: + tUNK_0 = entry[0]; + tUNK_1 = entry[1]; + tUNK_2 = entry[2]; + tRP = entry[3]; + tRAS = entry[5]; + tRFC = entry[7]; + tRC = entry[9]; + tUNK_10 = entry[10]; + tUNK_11 = entry[11]; + tUNK_12 = entry[12]; + tUNK_13 = entry[13]; + tUNK_14 = entry[14]; + break; + } + + timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP); + + /* XXX: I don't trust the -1's and +1's... they must come + * from somewhere! */ + timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 | + tUNK_18 << 16 | + (tUNK_1 + tUNK_19 + 1) << 8 | + (tUNK_2 - 1)); + + timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); + if(recordlen > 19) { + timing->reg_100228 += (tUNK_19 - 1) << 24; + } else { + timing->reg_100228 += tUNK_12 << 24; + } + + /* XXX: reg_10022c */ + + timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | + tUNK_13 << 8 | tUNK_13); + + /* XXX: +6? */ + timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); + if(tUNK_10 > tUNK_11) { + timing->reg_100234 += tUNK_10 << 16; + } else { + timing->reg_100234 += tUNK_11 << 16; + } + + /* XXX; reg_100238, reg_10023c */ + NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, + timing->reg_100220, timing->reg_100224, + timing->reg_100228, timing->reg_10022c); + NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", + timing->reg_100230, timing->reg_100234, + timing->reg_100238, timing->reg_10023c); + } + + memtimings->nr_timing = entries; + memtimings->supported = true; +} + +void +nouveau_mem_timing_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; + + kfree(mem->timing); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3ec181ff50ce..2cc59f8c658b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -28,6 +28,7 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" int nouveau_notifier_init_channel(struct nouveau_channel *chan) @@ -112,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, return -ENOMEM; } - offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT; + offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { target = NV_DMA_TARGET_VIDMEM; } else @@ -146,11 +147,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, nobj->dtor = nouveau_notifier_gpuobj_dtor; nobj->priv = mem; - ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); + ret = nouveau_ramht_insert(chan, handle, nobj); + nouveau_gpuobj_ref(NULL, &nobj); if (ret) { - nouveau_gpuobj_del(dev, &nobj); drm_mm_put_block(mem); - NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); + NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index b6bcb254f4ab..896cf8634144 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -34,6 +34,7 @@ #include "drm.h" #include "nouveau_drv.h" #include "nouveau_drm.h" +#include "nouveau_ramht.h" /* NVidia uses context objects to drive drawing operations. @@ -65,137 +66,6 @@ The key into the hash table depends on the object handle and channel id and is given as: */ -static uint32_t -nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t hash = 0; - int i; - - NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle); - - for (i = 32; i > 0; i -= dev_priv->ramht_bits) { - hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); - handle >>= dev_priv->ramht_bits; - } - - if (dev_priv->card_type < NV_50) - hash ^= channel << (dev_priv->ramht_bits - 4); - hash <<= 3; - - NV_DEBUG(dev, "hash=0x%08x\n", hash); - return hash; -} - -static int -nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, - uint32_t offset) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); - - if (dev_priv->card_type < NV_40) - return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); - return (ctx != 0); -} - -static int -nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - struct nouveau_channel *chan = ref->channel; - struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; - uint32_t ctx, co, ho; - - if (!ramht) { - NV_ERROR(dev, "No hash table!\n"); - return -EINVAL; - } - - if (dev_priv->card_type < NV_40) { - ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | - (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); - } else - if (dev_priv->card_type < NV_50) { - ctx = (ref->instance >> 4) | - (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); - } else { - if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { - ctx = (ref->instance << 10) | 2; - } else { - ctx = (ref->instance >> 4) | - ((ref->gpuobj->engine << - NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); - } - } - - co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); - do { - if (!nouveau_ramht_entry_valid(dev, ramht, co)) { - NV_DEBUG(dev, - "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - chan->id, co, ref->handle, ctx); - nv_wo32(dev, ramht, (co + 0)/4, ref->handle); - nv_wo32(dev, ramht, (co + 4)/4, ctx); - - list_add_tail(&ref->list, &chan->ramht_refs); - instmem->flush(dev); - return 0; - } - NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", - chan->id, co, nv_ro32(dev, ramht, co/4)); - - co += 8; - if (co >= dev_priv->ramht_size) - co = 0; - } while (co != ho); - - NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); - return -ENOMEM; -} - -static void -nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - struct nouveau_channel *chan = ref->channel; - struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; - uint32_t co, ho; - - if (!ramht) { - NV_ERROR(dev, "No hash table!\n"); - return; - } - - co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); - do { - if (nouveau_ramht_entry_valid(dev, ramht, co) && - (ref->handle == nv_ro32(dev, ramht, (co/4)))) { - NV_DEBUG(dev, - "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - chan->id, co, ref->handle, - nv_ro32(dev, ramht, (co + 4))); - nv_wo32(dev, ramht, (co + 0)/4, 0x00000000); - nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); - - list_del(&ref->list); - instmem->flush(dev); - return; - } - - co += 8; - if (co >= dev_priv->ramht_size) - co = 0; - } while (co != ho); - list_del(&ref->list); - - NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", - chan->id, ref->handle); -} int nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, @@ -205,7 +75,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; struct nouveau_gpuobj *gpuobj; - struct drm_mm *pramin = NULL; + struct drm_mm_node *ramin = NULL; int ret; NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", @@ -218,88 +88,115 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, if (!gpuobj) return -ENOMEM; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + gpuobj->dev = dev; gpuobj->flags = flags; - gpuobj->im_channel = chan; + kref_init(&gpuobj->refcount); + gpuobj->size = size; + spin_lock(&dev_priv->ramin_lock); list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); - /* Choose between global instmem heap, and per-channel private - * instmem heap. On ramin_heap; + + ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); + if (ramin) + ramin = drm_mm_get_block(ramin, size, align); + + if (!ramin) { + nouveau_gpuobj_ref(NULL, &gpuobj); + return -ENOMEM; + } } else { NV_DEBUG(dev, "global heap\n"); - pramin = &dev_priv->ramin_heap; + /* allocate backing pages, sets vinst */ ret = engine->instmem.populate(dev, gpuobj, &size); if (ret) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return ret; } + + /* try and get aperture space */ + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, + align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + nouveau_gpuobj_ref(NULL, &gpuobj); + return ret; + } + + ramin = drm_mm_get_block_atomic(ramin, size, align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); + + /* on nv50 it's ok to fail, we have a fallback path */ + if (!ramin && dev_priv->card_type < NV_50) { + nouveau_gpuobj_ref(NULL, &gpuobj); + return -ENOMEM; + } } - /* Allocate a chunk of the PRAMIN aperture */ - gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0); - if (gpuobj->im_pramin) - gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); - - if (!gpuobj->im_pramin) { - nouveau_gpuobj_del(dev, &gpuobj); - return -ENOMEM; - } - - if (!chan) { + /* if we got a chunk of the aperture, map pages into it */ + gpuobj->im_pramin = ramin; + if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { ret = engine->instmem.bind(dev, gpuobj); if (ret) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return ret; } } + /* calculate the various different addresses for the object */ + if (chan) { + gpuobj->pinst = chan->ramin->pinst; + if (gpuobj->pinst != ~0) + gpuobj->pinst += gpuobj->im_pramin->start; + + if (dev_priv->card_type < NV_50) { + gpuobj->cinst = gpuobj->pinst; + } else { + gpuobj->cinst = gpuobj->im_pramin->start; + gpuobj->vinst = gpuobj->im_pramin->start + + chan->ramin->vinst; + } + } else { + if (gpuobj->im_pramin) + gpuobj->pinst = gpuobj->im_pramin->start; + else + gpuobj->pinst = ~0; + gpuobj->cinst = 0xdeadbeef; + } + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { int i; - for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0); engine->instmem.flush(dev); } + *gpuobj_ret = gpuobj; return 0; } -int -nouveau_gpuobj_early_init(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - NV_DEBUG(dev, "\n"); - - INIT_LIST_HEAD(&dev_priv->gpuobj_list); - - return 0; -} - int nouveau_gpuobj_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int ret; NV_DEBUG(dev, "\n"); - if (dev_priv->card_type < NV_50) { - ret = nouveau_gpuobj_new_fake(dev, - dev_priv->ramht_offset, ~0, dev_priv->ramht_size, - NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, - &dev_priv->ramht, NULL); - if (ret) - return ret; - } + INIT_LIST_HEAD(&dev_priv->gpuobj_list); + spin_lock_init(&dev_priv->ramin_lock); + dev_priv->ramin_base = ~0; return 0; } @@ -311,297 +208,89 @@ nouveau_gpuobj_takedown(struct drm_device *dev) NV_DEBUG(dev, "\n"); - nouveau_gpuobj_del(dev, &dev_priv->ramht); + BUG_ON(!list_empty(&dev_priv->gpuobj_list)); } -void -nouveau_gpuobj_late_takedown(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = NULL; - struct list_head *entry, *tmp; - - NV_DEBUG(dev, "\n"); - - list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { - gpuobj = list_entry(entry, struct nouveau_gpuobj, list); - - NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", - gpuobj, gpuobj->refcount); - gpuobj->refcount = 0; - nouveau_gpuobj_del(dev, &gpuobj); - } -} - -int -nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) + +static void +nouveau_gpuobj_del(struct kref *ref) { + struct nouveau_gpuobj *gpuobj = + container_of(ref, struct nouveau_gpuobj, refcount); + struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; - struct nouveau_gpuobj *gpuobj; int i; - NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); - - if (!dev_priv || !pgpuobj || !(*pgpuobj)) - return -EINVAL; - gpuobj = *pgpuobj; - - if (gpuobj->refcount != 0) { - NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount); - return -EINVAL; - } + NV_DEBUG(dev, "gpuobj %p\n", gpuobj); if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { - for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0); engine->instmem.flush(dev); } if (gpuobj->dtor) gpuobj->dtor(dev, gpuobj); - if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE)) + if (gpuobj->im_backing) engine->instmem.clear(dev, gpuobj); - if (gpuobj->im_pramin) { - if (gpuobj->flags & NVOBJ_FLAG_FAKE) - kfree(gpuobj->im_pramin); - else - drm_mm_put_block(gpuobj->im_pramin); - } - + spin_lock(&dev_priv->ramin_lock); + if (gpuobj->im_pramin) + drm_mm_put_block(gpuobj->im_pramin); list_del(&gpuobj->list); + spin_unlock(&dev_priv->ramin_lock); - *pgpuobj = NULL; kfree(gpuobj); - return 0; } -static int -nouveau_gpuobj_instance_get(struct drm_device *dev, - struct nouveau_channel *chan, - struct nouveau_gpuobj *gpuobj, uint32_t *inst) +void +nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *cpramin; + if (ref) + kref_get(&ref->refcount); - /* card_type < NV_50) { - *inst = gpuobj->im_pramin->start; - return 0; - } + if (*ptr) + kref_put(&(*ptr)->refcount, nouveau_gpuobj_del); - if (chan && gpuobj->im_channel != chan) { - NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n", - gpuobj->im_channel->id, chan->id); - return -EINVAL; - } - - /* NV50 channel-local instance */ - if (chan) { - cpramin = chan->ramin->gpuobj; - *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; - return 0; - } - - /* NV50 global (VRAM) instance */ - if (!gpuobj->im_channel) { - /* ...from global heap */ - if (!gpuobj->im_backing) { - NV_ERROR(dev, "AII, no VRAM backing gpuobj\n"); - return -EINVAL; - } - *inst = gpuobj->im_backing_start; - return 0; - } else { - /* ...from local heap */ - cpramin = gpuobj->im_channel->ramin->gpuobj; - *inst = cpramin->im_backing_start + - (gpuobj->im_pramin->start - cpramin->im_pramin->start); - return 0; - } - - return -EINVAL; + *ptr = ref; } int -nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, - uint32_t handle, struct nouveau_gpuobj *gpuobj, - struct nouveau_gpuobj_ref **ref_ret) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj_ref *ref; - uint32_t instance; - int ret; - - NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n", - chan ? chan->id : -1, handle, gpuobj); - - if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) - return -EINVAL; - - if (!chan && !ref_ret) - return -EINVAL; - - if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) { - /* sw object */ - instance = 0x40; - } else { - ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); - if (ret) - return ret; - } - - ref = kzalloc(sizeof(*ref), GFP_KERNEL); - if (!ref) - return -ENOMEM; - INIT_LIST_HEAD(&ref->list); - ref->gpuobj = gpuobj; - ref->channel = chan; - ref->instance = instance; - - if (!ref_ret) { - ref->handle = handle; - - ret = nouveau_ramht_insert(dev, ref); - if (ret) { - kfree(ref); - return ret; - } - } else { - ref->handle = ~0; - *ref_ret = ref; - } - - ref->gpuobj->refcount++; - return 0; -} - -int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) -{ - struct nouveau_gpuobj_ref *ref; - - NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL); - - if (!dev || !pref || *pref == NULL) - return -EINVAL; - ref = *pref; - - if (ref->handle != ~0) - nouveau_ramht_remove(dev, ref); - - if (ref->gpuobj) { - ref->gpuobj->refcount--; - - if (ref->gpuobj->refcount == 0) { - if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) - nouveau_gpuobj_del(dev, &ref->gpuobj); - } - } - - *pref = NULL; - kfree(ref); - return 0; -} - -int -nouveau_gpuobj_new_ref(struct drm_device *dev, - struct nouveau_channel *oc, struct nouveau_channel *rc, - uint32_t handle, uint32_t size, int align, - uint32_t flags, struct nouveau_gpuobj_ref **ref) -{ - struct nouveau_gpuobj *gpuobj = NULL; - int ret; - - ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj); - if (ret) - return ret; - - ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref); - if (ret) { - nouveau_gpuobj_del(dev, &gpuobj); - return ret; - } - - return 0; -} - -int -nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, - struct nouveau_gpuobj_ref **ref_ret) -{ - struct nouveau_gpuobj_ref *ref; - struct list_head *entry, *tmp; - - list_for_each_safe(entry, tmp, &chan->ramht_refs) { - ref = list_entry(entry, struct nouveau_gpuobj_ref, list); - - if (ref->handle == handle) { - if (ref_ret) - *ref_ret = ref; - return 0; - } - } - - return -EINVAL; -} - -int -nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, - uint32_t b_offset, uint32_t size, - uint32_t flags, struct nouveau_gpuobj **pgpuobj, - struct nouveau_gpuobj_ref **pref) +nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, + u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = NULL; int i; NV_DEBUG(dev, - "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", - p_offset, b_offset, size, flags); + "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n", + pinst, vinst, size, flags); gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) return -ENOMEM; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); - gpuobj->im_channel = NULL; - gpuobj->flags = flags | NVOBJ_FLAG_FAKE; - - list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - - if (p_offset != ~0) { - gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), - GFP_KERNEL); - if (!gpuobj->im_pramin) { - nouveau_gpuobj_del(dev, &gpuobj); - return -ENOMEM; - } - gpuobj->im_pramin->start = p_offset; - gpuobj->im_pramin->size = size; - } - - if (b_offset != ~0) { - gpuobj->im_backing = (struct nouveau_bo *)-1; - gpuobj->im_backing_start = b_offset; - } + gpuobj->dev = dev; + gpuobj->flags = flags; + kref_init(&gpuobj->refcount); + gpuobj->size = size; + gpuobj->pinst = pinst; + gpuobj->cinst = 0xdeadbeef; + gpuobj->vinst = vinst; if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - for (i = 0; i < gpuobj->im_pramin->size; i += 4) - nv_wo32(dev, gpuobj, i/4, 0); + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0); dev_priv->engine.instmem.flush(dev); } - if (pref) { - i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref); - if (i) { - nouveau_gpuobj_del(dev, &gpuobj); - return i; - } - } - - if (pgpuobj) - *pgpuobj = gpuobj; + spin_lock(&dev_priv->ramin_lock); + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); + *pgpuobj = gpuobj; return 0; } @@ -685,14 +374,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, adjust = offset & 0x00000fff; frame = offset & ~0x00000fff; - nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) | - (adjust << 20) | - (access << 14) | - (target << 16) | - class)); - nv_wo32(dev, *gpuobj, 1, size - 1); - nv_wo32(dev, *gpuobj, 2, frame | pte_flags); - nv_wo32(dev, *gpuobj, 3, frame | pte_flags); + nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) | + (access << 14) | (target << 16) | + class)); + nv_wo32(*gpuobj, 4, size - 1); + nv_wo32(*gpuobj, 8, frame | pte_flags); + nv_wo32(*gpuobj, 12, frame | pte_flags); } else { uint64_t limit = offset + size - 1; uint32_t flags0, flags5; @@ -705,12 +392,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, flags5 = 0x00080000; } - nv_wo32(dev, *gpuobj, 0, flags0 | class); - nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit)); - nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset)); - nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) | - (upper_32_bits(offset) & 0xff)); - nv_wo32(dev, *gpuobj, 5, flags5); + nv_wo32(*gpuobj, 0, flags0 | class); + nv_wo32(*gpuobj, 4, lower_32_bits(limit)); + nv_wo32(*gpuobj, 8, lower_32_bits(offset)); + nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | + (upper_32_bits(offset) & 0xff)); + nv_wo32(*gpuobj, 20, flags5); } instmem->flush(dev); @@ -741,7 +428,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, *o_ret = 0; } else if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { - *gpuobj = dev_priv->gart_info.sg_ctxdma; + nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); if (offset & ~0xffffffffULL) { NV_ERROR(dev, "obj offset exceeds 32-bits\n"); return -EINVAL; @@ -829,25 +516,25 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, } if (dev_priv->card_type >= NV_50) { - nv_wo32(dev, *gpuobj, 0, class); - nv_wo32(dev, *gpuobj, 5, 0x00010000); + nv_wo32(*gpuobj, 0, class); + nv_wo32(*gpuobj, 20, 0x00010000); } else { switch (class) { case NV_CLASS_NULL: - nv_wo32(dev, *gpuobj, 0, 0x00001030); - nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF); + nv_wo32(*gpuobj, 0, 0x00001030); + nv_wo32(*gpuobj, 4, 0xFFFFFFFF); break; default: if (dev_priv->card_type >= NV_40) { - nv_wo32(dev, *gpuobj, 0, class); + nv_wo32(*gpuobj, 0, class); #ifdef __BIG_ENDIAN - nv_wo32(dev, *gpuobj, 2, 0x01000000); + nv_wo32(*gpuobj, 8, 0x01000000); #endif } else { #ifdef __BIG_ENDIAN - nv_wo32(dev, *gpuobj, 0, class | 0x00080000); + nv_wo32(*gpuobj, 0, class | 0x00080000); #else - nv_wo32(dev, *gpuobj, 0, class); + nv_wo32(*gpuobj, 0, class); #endif } } @@ -873,10 +560,15 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) return -ENOMEM; + gpuobj->dev = chan->dev; gpuobj->engine = NVOBJ_ENGINE_SW; gpuobj->class = class; + kref_init(&gpuobj->refcount); + gpuobj->cinst = 0x40; + spin_lock(&dev_priv->ramin_lock); list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); *gpuobj_ret = gpuobj; return 0; } @@ -886,7 +578,6 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *pramin = NULL; uint32_t size; uint32_t base; int ret; @@ -911,18 +602,16 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) size += 0x1000; } - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, - &chan->ramin); + ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); if (ret) { NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); return ret; } - pramin = chan->ramin->gpuobj; - ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size); + ret = drm_mm_init(&chan->ramin_heap, base, size); if (ret) { NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); - nouveau_gpuobj_ref_del(dev, &chan->ramin); + nouveau_gpuobj_ref(NULL, &chan->ramin); return ret; } @@ -939,8 +628,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_gpuobj *vram = NULL, *tt = NULL; int ret, i; - INIT_LIST_HEAD(&chan->ramht_refs); - NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); /* Allocate a chunk of memory for per-channel object storage */ @@ -956,41 +643,38 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, * locations determined during init. */ if (dev_priv->card_type >= NV_50) { - uint32_t vm_offset, pde; + u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; + u64 vm_vinst = chan->ramin->vinst + pgd_offs; + u32 vm_pinst = chan->ramin->pinst; + u32 pde; - vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; - vm_offset += chan->ramin->gpuobj->im_pramin->start; + if (vm_pinst != ~0) + vm_pinst += pgd_offs; - ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, - 0, &chan->vm_pd, NULL); + ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000, + 0, &chan->vm_pd); if (ret) return ret; for (i = 0; i < 0x4000; i += 8) { - nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); - nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); + nv_wo32(chan->vm_pd, i + 0, 0x00000000); + nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); } - pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2; - ret = nouveau_gpuobj_ref_add(dev, NULL, 0, - dev_priv->gart_info.sg_ctxdma, - &chan->vm_gart_pt); - if (ret) - return ret; - nv_wo32(dev, chan->vm_pd, pde++, - chan->vm_gart_pt->instance | 0x03); - nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); + nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, + &chan->vm_gart_pt); + pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; + nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3); + nv_wo32(chan->vm_pd, pde + 4, 0x00000000); - pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2; + pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - ret = nouveau_gpuobj_ref_add(dev, NULL, 0, - dev_priv->vm_vram_pt[i], - &chan->vm_vram_pt[i]); - if (ret) - return ret; + nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i], + &chan->vm_vram_pt[i]); - nv_wo32(dev, chan->vm_pd, pde++, - chan->vm_vram_pt[i]->instance | 0x61); - nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); + nv_wo32(chan->vm_pd, pde + 0, + chan->vm_vram_pt[i]->vinst | 0x61); + nv_wo32(chan->vm_pd, pde + 4, 0x00000000); + pde += 8; } instmem->flush(dev); @@ -998,15 +682,17 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, /* RAMHT */ if (dev_priv->card_type < NV_50) { - ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, - &chan->ramht); + nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL); + } else { + struct nouveau_gpuobj *ramht = NULL; + + ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16, + NVOBJ_FLAG_ZERO_ALLOC, &ramht); if (ret) return ret; - } else { - ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, - 0x8000, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &chan->ramht); + + ret = nouveau_ramht_new(dev, ramht, &chan->ramht); + nouveau_gpuobj_ref(NULL, &ramht); if (ret) return ret; } @@ -1023,24 +709,32 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, } } else { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->fb_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_VIDMEM, &vram); + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM, &vram); if (ret) { NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); return ret; } } - ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL); + ret = nouveau_ramht_insert(chan, vram_h, vram); + nouveau_gpuobj_ref(NULL, &vram); if (ret) { - NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret); + NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret); return ret; } /* TT memory ctxdma */ if (dev_priv->card_type >= NV_50) { - tt = vram; + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->vm_end, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP, &tt); + if (ret) { + NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); + return ret; + } } else if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { ret = nouveau_gpuobj_gart_dma_new(chan, 0, @@ -1056,9 +750,10 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, return ret; } - ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); + ret = nouveau_ramht_insert(chan, tt_h, tt); + nouveau_gpuobj_ref(NULL, &tt); if (ret) { - NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret); + NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret); return ret; } @@ -1070,33 +765,23 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; - struct list_head *entry, *tmp; - struct nouveau_gpuobj_ref *ref; int i; NV_DEBUG(dev, "ch%d\n", chan->id); - if (!chan->ramht_refs.next) + if (!chan->ramht) return; - list_for_each_safe(entry, tmp, &chan->ramht_refs) { - ref = list_entry(entry, struct nouveau_gpuobj_ref, list); + nouveau_ramht_ref(NULL, &chan->ramht, chan); - nouveau_gpuobj_ref_del(dev, &ref); - } - - nouveau_gpuobj_ref_del(dev, &chan->ramht); - - nouveau_gpuobj_del(dev, &chan->vm_pd); - nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); + nouveau_gpuobj_ref(NULL, &chan->vm_pd); + nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) - nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); + nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); if (chan->ramin_heap.free_stack.next) drm_mm_takedown(&chan->ramin_heap); - if (chan->ramin) - nouveau_gpuobj_ref_del(dev, &chan->ramin); - + nouveau_gpuobj_ref(NULL, &chan->ramin); } int @@ -1117,17 +802,17 @@ nouveau_gpuobj_suspend(struct drm_device *dev) } list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE)) + if (!gpuobj->im_backing) continue; - gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size); + gpuobj->im_backing_suspend = vmalloc(gpuobj->size); if (!gpuobj->im_backing_suspend) { nouveau_gpuobj_resume(dev); return -ENOMEM; } - for (i = 0; i < gpuobj->im_pramin->size / 4; i++) - gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); + for (i = 0; i < gpuobj->size; i += 4) + gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); } return 0; @@ -1172,8 +857,8 @@ nouveau_gpuobj_resume(struct drm_device *dev) if (!gpuobj->im_backing_suspend) continue; - for (i = 0; i < gpuobj->im_pramin->size / 4; i++) - nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); dev_priv->engine.instmem.flush(dev); } @@ -1208,25 +893,24 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, return -EPERM; } - if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) + if (nouveau_ramht_find(chan, init->handle)) return -EEXIST; if (!grc->software) ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); else ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); - if (ret) { NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); return ret; } - ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL); + ret = nouveau_ramht_insert(chan, init->handle, gr); + nouveau_gpuobj_ref(NULL, &gr); if (ret) { NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); - nouveau_gpuobj_del(dev, &gr); return ret; } @@ -1237,16 +921,62 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_gpuobj_free *objfree = data; - struct nouveau_gpuobj_ref *ref; + struct nouveau_gpuobj *gpuobj; struct nouveau_channel *chan; - int ret; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); - ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); - if (ret) - return ret; - nouveau_gpuobj_ref_del(dev, &ref); + gpuobj = nouveau_ramht_find(chan, objfree->handle); + if (!gpuobj) + return -ENOENT; + nouveau_ramht_remove(chan, objfree->handle); return 0; } + +u32 +nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) +{ + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_device *dev = gpuobj->dev; + + if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { + u64 ptr = gpuobj->vinst + offset; + u32 base = ptr >> 16; + u32 val; + + spin_lock(&dev_priv->ramin_lock); + if (dev_priv->ramin_base != base) { + dev_priv->ramin_base = base; + nv_wr32(dev, 0x001700, dev_priv->ramin_base); + } + val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); + spin_unlock(&dev_priv->ramin_lock); + return val; + } + + return nv_ri32(dev, gpuobj->pinst + offset); +} + +void +nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) +{ + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_device *dev = gpuobj->dev; + + if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { + u64 ptr = gpuobj->vinst + offset; + u32 base = ptr >> 16; + + spin_lock(&dev_priv->ramin_lock); + if (dev_priv->ramin_base != base) { + dev_priv->ramin_base = base; + nv_wr32(dev, 0x001700, dev_priv->ramin_base); + } + nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); + spin_unlock(&dev_priv->ramin_lock); + return; + } + + nv_wi32(dev, gpuobj->pinst + offset, val); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c new file mode 100644 index 000000000000..ac62a1b8c4fc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -0,0 +1,205 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +static void +legacy_perf_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + char *perf, *entry, *bmp = &bios->data[bios->offset]; + int headerlen, use_straps; + + if (bmp[5] < 0x5 || bmp[6] < 0x14) { + NV_DEBUG(dev, "BMP version too old for perf\n"); + return; + } + + perf = ROMPTR(bios, bmp[0x73]); + if (!perf) { + NV_DEBUG(dev, "No memclock table pointer found.\n"); + return; + } + + switch (perf[0]) { + case 0x12: + case 0x14: + case 0x18: + use_straps = 0; + headerlen = 1; + break; + case 0x01: + use_straps = perf[1] & 1; + headerlen = (use_straps ? 8 : 2); + break; + default: + NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]); + return; + } + + entry = perf + headerlen; + if (use_straps) + entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1; + + sprintf(pm->perflvl[0].name, "performance_level_0"); + pm->perflvl[0].memory = ROM16(entry[0]) * 20; + pm->nr_perflvl = 1; +} + +void +nouveau_perf_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nvbios *bios = &dev_priv->vbios; + struct bit_entry P; + u8 version, headerlen, recordlen, entries; + u8 *perf, *entry; + int vid, i; + + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version != 1 && P.version != 2) { + NV_WARN(dev, "unknown perf for BIT P %d\n", P.version); + return; + } + + perf = ROMPTR(bios, P.data[0]); + version = perf[0]; + headerlen = perf[1]; + if (version < 0x40) { + recordlen = perf[3] + (perf[4] * perf[5]); + entries = perf[2]; + } else { + recordlen = perf[2] + (perf[3] * perf[4]); + entries = perf[5]; + } + } else { + if (bios->data[bios->offset + 6] < 0x25) { + legacy_perf_init(dev); + return; + } + + perf = ROMPTR(bios, bios->data[bios->offset + 0x94]); + if (!perf) { + NV_DEBUG(dev, "perf table pointer invalid\n"); + return; + } + + version = perf[1]; + headerlen = perf[0]; + recordlen = perf[3]; + entries = perf[2]; + } + + entry = perf + headerlen; + for (i = 0; i < entries; i++) { + struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; + + if (entry[0] == 0xff) { + entry += recordlen; + continue; + } + + switch (version) { + case 0x12: + case 0x13: + case 0x15: + perflvl->fanspeed = entry[55]; + perflvl->voltage = entry[56]; + perflvl->core = ROM32(entry[1]) * 10; + perflvl->memory = ROM32(entry[5]) * 20; + break; + case 0x21: + case 0x23: + case 0x24: + perflvl->fanspeed = entry[4]; + perflvl->voltage = entry[5]; + perflvl->core = ROM16(entry[6]) * 1000; + + if (dev_priv->chipset == 0x49 || + dev_priv->chipset == 0x4b) + perflvl->memory = ROM16(entry[11]) * 1000; + else + perflvl->memory = ROM16(entry[11]) * 2000; + + break; + case 0x25: + perflvl->fanspeed = entry[4]; + perflvl->voltage = entry[5]; + perflvl->core = ROM16(entry[6]) * 1000; + perflvl->shader = ROM16(entry[10]) * 1000; + perflvl->memory = ROM16(entry[12]) * 1000; + break; + case 0x30: + perflvl->memscript = ROM16(entry[2]); + case 0x35: + perflvl->fanspeed = entry[6]; + perflvl->voltage = entry[7]; + perflvl->core = ROM16(entry[8]) * 1000; + perflvl->shader = ROM16(entry[10]) * 1000; + perflvl->memory = ROM16(entry[12]) * 1000; + /*XXX: confirm on 0x35 */ + perflvl->unk05 = ROM16(entry[16]) * 1000; + break; + case 0x40: +#define subent(n) entry[perf[2] + ((n) * perf[3])] + perflvl->fanspeed = 0; /*XXX*/ + perflvl->voltage = entry[2]; + perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000; + perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000; + perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000; + break; + } + + /* make sure vid is valid */ + if (pm->voltage.supported && perflvl->voltage) { + vid = nouveau_volt_vid_lookup(dev, perflvl->voltage); + if (vid < 0) { + NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i); + entry += recordlen; + continue; + } + } + + snprintf(perflvl->name, sizeof(perflvl->name), + "performance_level_%d", i); + perflvl->id = i; + pm->nr_perflvl++; + + entry += recordlen; + } +} + +void +nouveau_perf_fini(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c new file mode 100644 index 000000000000..1c99c55d6d46 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -0,0 +1,518 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +#include +#include + +static int +nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl, + u8 id, u32 khz) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + void *pre_state; + + if (khz == 0) + return 0; + + pre_state = pm->clock_pre(dev, perflvl, id, khz); + if (IS_ERR(pre_state)) + return PTR_ERR(pre_state); + + if (pre_state) + pm->clock_set(dev, pre_state); + return 0; +} + +static int +nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + int ret; + + if (perflvl == pm->cur) + return 0; + + if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) { + ret = pm->voltage_set(dev, perflvl->voltage); + if (ret) { + NV_ERROR(dev, "voltage_set %d failed: %d\n", + perflvl->voltage, ret); + } + } + + nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core); + nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader); + nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory); + nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05); + + pm->cur = perflvl; + return 0; +} + +static int +nouveau_pm_profile_set(struct drm_device *dev, const char *profile) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_level *perflvl = NULL; + + /* safety precaution, for now */ + if (nouveau_perflvl_wr != 7777) + return -EPERM; + + if (!pm->clock_set) + return -EINVAL; + + if (!strncmp(profile, "boot", 4)) + perflvl = &pm->boot; + else { + int pl = simple_strtol(profile, NULL, 10); + int i; + + for (i = 0; i < pm->nr_perflvl; i++) { + if (pm->perflvl[i].id == pl) { + perflvl = &pm->perflvl[i]; + break; + } + } + + if (!perflvl) + return -EINVAL; + } + + NV_INFO(dev, "setting performance level: %s\n", profile); + return nouveau_pm_perflvl_set(dev, perflvl); +} + +static int +nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + int ret; + + if (!pm->clock_get) + return -EINVAL; + + memset(perflvl, 0, sizeof(*perflvl)); + + ret = pm->clock_get(dev, PLL_CORE); + if (ret > 0) + perflvl->core = ret; + + ret = pm->clock_get(dev, PLL_MEMORY); + if (ret > 0) + perflvl->memory = ret; + + ret = pm->clock_get(dev, PLL_SHADER); + if (ret > 0) + perflvl->shader = ret; + + ret = pm->clock_get(dev, PLL_UNK05); + if (ret > 0) + perflvl->unk05 = ret; + + if (pm->voltage.supported && pm->voltage_get) { + ret = pm->voltage_get(dev); + if (ret > 0) + perflvl->voltage = ret; + } + + return 0; +} + +static void +nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len) +{ + char c[16], s[16], v[16], f[16]; + + c[0] = '\0'; + if (perflvl->core) + snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000); + + s[0] = '\0'; + if (perflvl->shader) + snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000); + + v[0] = '\0'; + if (perflvl->voltage) + snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10); + + f[0] = '\0'; + if (perflvl->fanspeed) + snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed); + + snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000, + c, s, v, f); +} + +static ssize_t +nouveau_pm_get_perflvl_info(struct device *d, + struct device_attribute *a, char *buf) +{ + struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a; + char *ptr = buf; + int len = PAGE_SIZE; + + snprintf(ptr, len, "%d: ", perflvl->id); + ptr += strlen(buf); + len -= strlen(buf); + + nouveau_pm_perflvl_info(perflvl, ptr, len); + return strlen(buf); +} + +static ssize_t +nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_level cur; + int len = PAGE_SIZE, ret; + char *ptr = buf; + + if (!pm->cur) + snprintf(ptr, len, "setting: boot\n"); + else if (pm->cur == &pm->boot) + snprintf(ptr, len, "setting: boot\nc: "); + else + snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id); + ptr += strlen(buf); + len -= strlen(buf); + + ret = nouveau_pm_perflvl_get(dev, &cur); + if (ret == 0) + nouveau_pm_perflvl_info(&cur, ptr, len); + return strlen(buf); +} + +static ssize_t +nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); + int ret; + + ret = nouveau_pm_profile_set(dev, buf); + if (ret) + return ret; + return strlen(buf); +} + +static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR, + nouveau_pm_get_perflvl, nouveau_pm_set_perflvl); + +static int +nouveau_sysfs_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct device *d = &dev->pdev->dev; + int ret, i; + + ret = device_create_file(d, &dev_attr_performance_level); + if (ret) + return ret; + + for (i = 0; i < pm->nr_perflvl; i++) { + struct nouveau_pm_level *perflvl = &pm->perflvl[i]; + + perflvl->dev_attr.attr.name = perflvl->name; + perflvl->dev_attr.attr.mode = S_IRUGO; + perflvl->dev_attr.show = nouveau_pm_get_perflvl_info; + perflvl->dev_attr.store = NULL; + sysfs_attr_init(&perflvl->dev_attr.attr); + + ret = device_create_file(d, &perflvl->dev_attr); + if (ret) { + NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n", + perflvl->id, i); + perflvl->dev_attr.attr.name = NULL; + nouveau_pm_fini(dev); + return ret; + } + } + + return 0; +} + +static void +nouveau_sysfs_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct device *d = &dev->pdev->dev; + int i; + + device_remove_file(d, &dev_attr_performance_level); + for (i = 0; i < pm->nr_perflvl; i++) { + struct nouveau_pm_level *pl = &pm->perflvl[i]; + + if (!pl->dev_attr.attr.name) + break; + + device_remove_file(d, &pl->dev_attr); + } +} + +static ssize_t +nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + + return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000); +} +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, + NULL, 0); + +static ssize_t +nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000); +} +static ssize_t +nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, + const char *buf, size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; + long value; + + if (strict_strtol(buf, 10, &value) == -EINVAL) + return count; + + temp->down_clock = value/1000; + + nouveau_temp_safety_checks(dev); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp, + nouveau_hwmon_set_max_temp, + 0); + +static ssize_t +nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, + char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000); +} +static ssize_t +nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, + const char *buf, + size_t count) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp; + long value; + + if (strict_strtol(buf, 10, &value) == -EINVAL) + return count; + + temp->critical = value/1000; + + nouveau_temp_safety_checks(dev); + + return count; +} +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, + nouveau_hwmon_critical_temp, + nouveau_hwmon_set_critical_temp, + 0); + +static ssize_t nouveau_hwmon_show_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "nouveau\n"); +} +static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0); + +static ssize_t nouveau_hwmon_show_update_rate(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "1000\n"); +} +static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO, + nouveau_hwmon_show_update_rate, + NULL, 0); + +static struct attribute *hwmon_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_update_rate.dev_attr.attr, + NULL +}; + +static const struct attribute_group hwmon_attrgroup = { + .attrs = hwmon_attributes, +}; + +static int +nouveau_hwmon_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct device *hwmon_dev; + int ret; + + if (!pm->temp_get) + return -ENODEV; + + hwmon_dev = hwmon_device_register(&dev->pdev->dev); + if (IS_ERR(hwmon_dev)) { + ret = PTR_ERR(hwmon_dev); + NV_ERROR(dev, + "Unable to register hwmon device: %d\n", ret); + return ret; + } + dev_set_drvdata(hwmon_dev, dev); + ret = sysfs_create_group(&hwmon_dev->kobj, + &hwmon_attrgroup); + if (ret) { + NV_ERROR(dev, + "Unable to create hwmon sysfs file: %d\n", ret); + hwmon_device_unregister(hwmon_dev); + return ret; + } + + pm->hwmon = hwmon_dev; + + return 0; +} + +static void +nouveau_hwmon_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + + if (pm->hwmon) { + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); + hwmon_device_unregister(pm->hwmon); + } +} + +int +nouveau_pm_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + char info[256]; + int ret, i; + + nouveau_volt_init(dev); + nouveau_perf_init(dev); + nouveau_temp_init(dev); + nouveau_mem_timing_init(dev); + + NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); + for (i = 0; i < pm->nr_perflvl; i++) { + nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); + NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info); + } + + /* determine current ("boot") performance level */ + ret = nouveau_pm_perflvl_get(dev, &pm->boot); + if (ret == 0) { + pm->cur = &pm->boot; + + nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); + NV_INFO(dev, "c: %s", info); + } + + /* switch performance levels now if requested */ + if (nouveau_perflvl != NULL) { + ret = nouveau_pm_profile_set(dev, nouveau_perflvl); + if (ret) { + NV_ERROR(dev, "error setting perflvl \"%s\": %d\n", + nouveau_perflvl, ret); + } + } + + nouveau_sysfs_init(dev); + nouveau_hwmon_init(dev); + + return 0; +} + +void +nouveau_pm_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + + if (pm->cur != &pm->boot) + nouveau_pm_perflvl_set(dev, &pm->boot); + + nouveau_mem_timing_fini(dev); + nouveau_temp_fini(dev); + nouveau_perf_fini(dev); + nouveau_volt_fini(dev); + + nouveau_hwmon_fini(dev); + nouveau_sysfs_fini(dev); +} + +void +nouveau_pm_resume(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_level *perflvl; + + if (pm->cur == &pm->boot) + return; + + perflvl = pm->cur; + pm->cur = &pm->boot; + nouveau_pm_perflvl_set(dev, perflvl); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h new file mode 100644 index 000000000000..4a9838ddacec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_pm.h @@ -0,0 +1,74 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_PM_H__ +#define __NOUVEAU_PM_H__ + +/* nouveau_pm.c */ +int nouveau_pm_init(struct drm_device *dev); +void nouveau_pm_fini(struct drm_device *dev); +void nouveau_pm_resume(struct drm_device *dev); + +/* nouveau_volt.c */ +void nouveau_volt_init(struct drm_device *); +void nouveau_volt_fini(struct drm_device *); +int nouveau_volt_vid_lookup(struct drm_device *, int voltage); +int nouveau_volt_lvl_lookup(struct drm_device *, int vid); +int nouveau_voltage_gpio_get(struct drm_device *); +int nouveau_voltage_gpio_set(struct drm_device *, int voltage); + +/* nouveau_perf.c */ +void nouveau_perf_init(struct drm_device *); +void nouveau_perf_fini(struct drm_device *); + +/* nouveau_mem.c */ +void nouveau_mem_timing_init(struct drm_device *); +void nouveau_mem_timing_fini(struct drm_device *); + +/* nv04_pm.c */ +int nv04_pm_clock_get(struct drm_device *, u32 id); +void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, + u32 id, int khz); +void nv04_pm_clock_set(struct drm_device *, void *); + +/* nv50_pm.c */ +int nv50_pm_clock_get(struct drm_device *, u32 id); +void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, + u32 id, int khz); +void nv50_pm_clock_set(struct drm_device *, void *); + +/* nva3_pm.c */ +int nva3_pm_clock_get(struct drm_device *, u32 id); +void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *, + u32 id, int khz); +void nva3_pm_clock_set(struct drm_device *, void *); + +/* nouveau_temp.c */ +void nouveau_temp_init(struct drm_device *dev); +void nouveau_temp_fini(struct drm_device *dev); +void nouveau_temp_safety_checks(struct drm_device *dev); +int nv40_temp_get(struct drm_device *dev); +int nv84_temp_get(struct drm_device *dev); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c new file mode 100644 index 000000000000..7f16697cc96c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -0,0 +1,289 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_ramht.h" + +static u32 +nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_ramht *ramht = chan->ramht; + u32 hash = 0; + int i; + + NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle); + + for (i = 32; i > 0; i -= ramht->bits) { + hash ^= (handle & ((1 << ramht->bits) - 1)); + handle >>= ramht->bits; + } + + if (dev_priv->card_type < NV_50) + hash ^= chan->id << (ramht->bits - 4); + hash <<= 3; + + NV_DEBUG(dev, "hash=0x%08x\n", hash); + return hash; +} + +static int +nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, + u32 offset) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 ctx = nv_ro32(ramht, offset + 4); + + if (dev_priv->card_type < NV_40) + return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); + return (ctx != 0); +} + +static int +nouveau_ramht_entry_same_channel(struct nouveau_channel *chan, + struct nouveau_gpuobj *ramht, u32 offset) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + u32 ctx = nv_ro32(ramht, offset + 4); + + if (dev_priv->card_type >= NV_50) + return true; + else if (dev_priv->card_type >= NV_40) + return chan->id == + ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f); + else + return chan->id == + ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f); +} + +int +nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, + struct nouveau_gpuobj *gpuobj) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_ramht_entry *entry; + struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; + unsigned long flags; + u32 ctx, co, ho; + + if (nouveau_ramht_find(chan, handle)) + return -EEXIST; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + entry->channel = chan; + entry->gpuobj = NULL; + entry->handle = handle; + nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); + + if (dev_priv->card_type < NV_40) { + ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | + (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + } else + if (dev_priv->card_type < NV_50) { + ctx = (gpuobj->cinst >> 4) | + (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { + ctx = (gpuobj->cinst << 10) | 2; + } else { + ctx = (gpuobj->cinst >> 4) | + ((gpuobj->engine << + NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); + } + } + + spin_lock_irqsave(&chan->ramht->lock, flags); + list_add(&entry->head, &chan->ramht->entries); + + co = ho = nouveau_ramht_hash_handle(chan, handle); + do { + if (!nouveau_ramht_entry_valid(dev, ramht, co)) { + NV_DEBUG(dev, + "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + chan->id, co, handle, ctx); + nv_wo32(ramht, co + 0, handle); + nv_wo32(ramht, co + 4, ctx); + + spin_unlock_irqrestore(&chan->ramht->lock, flags); + instmem->flush(dev); + return 0; + } + NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", + chan->id, co, nv_ro32(ramht, co)); + + co += 8; + if (co >= ramht->size) + co = 0; + } while (co != ho); + + NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); + list_del(&entry->head); + spin_unlock_irqrestore(&chan->ramht->lock, flags); + kfree(entry); + return -ENOMEM; +} + +static void +nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; + struct nouveau_ramht_entry *entry, *tmp; + u32 co, ho; + + list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { + if (entry->channel != chan || entry->handle != handle) + continue; + + nouveau_gpuobj_ref(NULL, &entry->gpuobj); + list_del(&entry->head); + kfree(entry); + break; + } + + co = ho = nouveau_ramht_hash_handle(chan, handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && + nouveau_ramht_entry_same_channel(chan, ramht, co) && + (handle == nv_ro32(ramht, co))) { + NV_DEBUG(dev, + "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + chan->id, co, handle, nv_ro32(ramht, co + 4)); + nv_wo32(ramht, co + 0, 0x00000000); + nv_wo32(ramht, co + 4, 0x00000000); + instmem->flush(dev); + return; + } + + co += 8; + if (co >= ramht->size) + co = 0; + } while (co != ho); + + NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", + chan->id, handle); +} + +void +nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) +{ + struct nouveau_ramht *ramht = chan->ramht; + unsigned long flags; + + spin_lock_irqsave(&ramht->lock, flags); + nouveau_ramht_remove_locked(chan, handle); + spin_unlock_irqrestore(&ramht->lock, flags); +} + +struct nouveau_gpuobj * +nouveau_ramht_find(struct nouveau_channel *chan, u32 handle) +{ + struct nouveau_ramht *ramht = chan->ramht; + struct nouveau_ramht_entry *entry; + struct nouveau_gpuobj *gpuobj = NULL; + unsigned long flags; + + if (unlikely(!chan->ramht)) + return NULL; + + spin_lock_irqsave(&ramht->lock, flags); + list_for_each_entry(entry, &chan->ramht->entries, head) { + if (entry->channel == chan && entry->handle == handle) { + gpuobj = entry->gpuobj; + break; + } + } + spin_unlock_irqrestore(&ramht->lock, flags); + + return gpuobj; +} + +int +nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, + struct nouveau_ramht **pramht) +{ + struct nouveau_ramht *ramht; + + ramht = kzalloc(sizeof(*ramht), GFP_KERNEL); + if (!ramht) + return -ENOMEM; + + ramht->dev = dev; + kref_init(&ramht->refcount); + ramht->bits = drm_order(gpuobj->size / 8); + INIT_LIST_HEAD(&ramht->entries); + spin_lock_init(&ramht->lock); + nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj); + + *pramht = ramht; + return 0; +} + +static void +nouveau_ramht_del(struct kref *ref) +{ + struct nouveau_ramht *ramht = + container_of(ref, struct nouveau_ramht, refcount); + + nouveau_gpuobj_ref(NULL, &ramht->gpuobj); + kfree(ramht); +} + +void +nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, + struct nouveau_channel *chan) +{ + struct nouveau_ramht_entry *entry, *tmp; + struct nouveau_ramht *ramht; + unsigned long flags; + + if (ref) + kref_get(&ref->refcount); + + ramht = *ptr; + if (ramht) { + spin_lock_irqsave(&ramht->lock, flags); + list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { + if (entry->channel != chan) + continue; + + nouveau_ramht_remove_locked(chan, entry->handle); + } + spin_unlock_irqrestore(&ramht->lock, flags); + + kref_put(&ramht->refcount, nouveau_ramht_del); + } + *ptr = ref; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h new file mode 100644 index 000000000000..b79cb5e1a8f1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h @@ -0,0 +1,55 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_RAMHT_H__ +#define __NOUVEAU_RAMHT_H__ + +struct nouveau_ramht_entry { + struct list_head head; + struct nouveau_channel *channel; + struct nouveau_gpuobj *gpuobj; + u32 handle; +}; + +struct nouveau_ramht { + struct drm_device *dev; + struct kref refcount; + spinlock_t lock; + struct nouveau_gpuobj *gpuobj; + struct list_head entries; + int bits; +}; + +extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *, + struct nouveau_ramht **); +extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, + struct nouveau_channel *unref_channel); + +extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, + struct nouveau_gpuobj *); +extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); +extern struct nouveau_gpuobj * +nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 21a6e453b975..1b42541ca9e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -551,6 +551,8 @@ #define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C #define NV03_PFIFO_CACHE1_PULL0 0x00003240 #define NV04_PFIFO_CACHE1_PULL0 0x00003250 +# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010 +# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000 #define NV03_PFIFO_CACHE1_PULL1 0x00003250 #define NV04_PFIFO_CACHE1_PULL1 0x00003254 #define NV04_PFIFO_CACHE1_HASH 0x00003258 @@ -785,15 +787,12 @@ #define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8) #define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8) #define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8) +#define NV50_PDISPLAY_EXT_MODE_CTRL_P(i) (0x00610b80 + (i) * 0x8) +#define NV50_PDISPLAY_EXT_MODE_CTRL_C(i) (0x00610b84 + (i) * 0x8) #define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8) #define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8) - #define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8) #define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8) -#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8) -#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8) -#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8) -#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8) #define NV50_PDISPLAY_CRTC_CLK 0x00614000 #define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 6b9187d7f67d..288bacac7e5a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; unsigned i, j, pte; - NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); + NV_DEBUG(dev, "pg=0x%lx\n", mem->start); - pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); + pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); nvbe->pte_start = pte; for (i = 0; i < nvbe->nr_pages; i++) { dma_addr_t dma_offset = nvbe->pages[i]; @@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) uint32_t offset_h = upper_32_bits(dma_offset); for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { - if (dev_priv->card_type < NV_50) - nv_wo32(dev, gpuobj, pte++, offset_l | 3); - else { - nv_wo32(dev, gpuobj, pte++, offset_l | 0x21); - nv_wo32(dev, gpuobj, pte++, offset_h & 0xff); + if (dev_priv->card_type < NV_50) { + nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); + pte += 1; + } else { + nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21); + nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff); + pte += 2; } dma_offset += NV_CTXDMA_PAGE_SIZE; @@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be) dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { - if (dev_priv->card_type < NV_50) - nv_wo32(dev, gpuobj, pte++, dma_offset | 3); - else { - nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21); - nv_wo32(dev, gpuobj, pte++, 0x00000000); + if (dev_priv->card_type < NV_50) { + nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3); + pte += 1; + } else { + nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); + nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); + pte += 2; } dma_offset += NV_CTXDMA_PAGE_SIZE; @@ -230,7 +234,6 @@ nouveau_sgdma_init(struct drm_device *dev) } ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, - NVOBJ_FLAG_ALLOW_NO_REFS | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &gpuobj); if (ret) { @@ -239,9 +242,9 @@ nouveau_sgdma_init(struct drm_device *dev) } dev_priv->gart_info.sg_dummy_page = - alloc_page(GFP_KERNEL|__GFP_DMA32); + alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO); if (!dev_priv->gart_info.sg_dummy_page) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return -ENOMEM; } @@ -250,29 +253,34 @@ nouveau_sgdma_init(struct drm_device *dev) pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { - nouveau_gpuobj_del(dev, &gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); return -EFAULT; } if (dev_priv->card_type < NV_50) { + /* special case, allocated from global instmem heap so + * cinst is invalid, we use it on all channels though so + * cinst needs to be valid, set it the same as pinst + */ + gpuobj->cinst = gpuobj->pinst; + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE * on those cards? */ - nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | - (1 << 12) /* PT present */ | - (0 << 13) /* PT *not* linear */ | - (NV_DMA_ACCESS_RW << 14) | - (NV_DMA_TARGET_PCI << 16)); - nv_wo32(dev, gpuobj, 1, aper_size - 1); + nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | + (1 << 12) /* PT present */ | + (0 << 13) /* PT *not* linear */ | + (NV_DMA_ACCESS_RW << 14) | + (NV_DMA_TARGET_PCI << 16)); + nv_wo32(gpuobj, 4, aper_size - 1); for (i = 2; i < 2 + (aper_size >> 12); i++) { - nv_wo32(dev, gpuobj, i, - dev_priv->gart_info.sg_dummy_bus | 3); + nv_wo32(gpuobj, i * 4, + dev_priv->gart_info.sg_dummy_bus | 3); } } else { for (i = 0; i < obj_size; i += 8) { - nv_wo32(dev, gpuobj, (i+0)/4, - dev_priv->gart_info.sg_dummy_bus | 0x21); - nv_wo32(dev, gpuobj, (i+4)/4, 0); + nv_wo32(gpuobj, i + 0, 0x00000000); + nv_wo32(gpuobj, i + 4, 0x00000000); } } dev_priv->engine.instmem.flush(dev); @@ -298,7 +306,7 @@ nouveau_sgdma_takedown(struct drm_device *dev) dev_priv->gart_info.sg_dummy_bus = 0; } - nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); + nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); } int @@ -308,9 +316,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; int pte; - pte = (offset >> NV_CTXDMA_PAGE_SHIFT); + pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2; if (dev_priv->card_type < NV_50) { - *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; + *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 989322be3728..ed7757f14083 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -35,6 +35,8 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nouveau_fbcon.h" +#include "nouveau_ramht.h" +#include "nouveau_pm.h" #include "nv50_display.h" static void nouveau_stub_takedown(struct drm_device *dev) {} @@ -78,7 +80,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv04_fifo_channel_id; engine->fifo.create_context = nv04_fifo_create_context; @@ -95,6 +96,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = NULL; engine->gpio.set = NULL; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; break; case 0x10: engine->instmem.init = nv04_instmem_init; @@ -130,7 +134,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; @@ -147,6 +150,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv10_gpio_get; engine->gpio.set = nv10_gpio_set; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; break; case 0x20: engine->instmem.init = nv04_instmem_init; @@ -182,7 +188,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; @@ -199,6 +204,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv10_gpio_get; engine->gpio.set = nv10_gpio_set; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; break; case 0x30: engine->instmem.init = nv04_instmem_init; @@ -234,7 +242,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; @@ -251,6 +258,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv10_gpio_get; engine->gpio.set = nv10_gpio_set; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; break; case 0x40: case 0x60: @@ -287,7 +299,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; - engine->fifo.cache_flush = nv04_fifo_cache_flush; engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv40_fifo_create_context; @@ -304,6 +315,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv10_gpio_get; engine->gpio.set = nv10_gpio_set; engine->gpio.irq_enable = NULL; + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; + engine->pm.temp_get = nv40_temp_get; break; case 0x50: case 0x80: /* gotta love NVIDIA's consistency.. */ @@ -358,6 +375,27 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; engine->gpio.irq_enable = nv50_gpio_irq_enable; + switch (dev_priv->chipset) { + case 0xa3: + case 0xa5: + case 0xa8: + case 0xaf: + engine->pm.clock_get = nva3_pm_clock_get; + engine->pm.clock_pre = nva3_pm_clock_pre; + engine->pm.clock_set = nva3_pm_clock_set; + break; + default: + engine->pm.clock_get = nv50_pm_clock_get; + engine->pm.clock_pre = nv50_pm_clock_pre; + engine->pm.clock_set = nv50_pm_clock_set; + break; + } + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; + if (dev_priv->chipset >= 0x84) + engine->pm.temp_get = nv84_temp_get; + else + engine->pm.temp_get = nv40_temp_get; break; case 0xC0: engine->instmem.init = nvc0_instmem_init; @@ -437,16 +475,14 @@ static int nouveau_card_init_channel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj; + struct nouveau_gpuobj *gpuobj = NULL; int ret; ret = nouveau_channel_alloc(dev, &dev_priv->channel, - (struct drm_file *)-2, - NvDmaFB, NvDmaTT); + (struct drm_file *)-2, NvDmaFB, NvDmaTT); if (ret) return ret; - gpuobj = NULL; ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vram_size, NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, @@ -454,26 +490,25 @@ nouveau_card_init_channel(struct drm_device *dev) if (ret) goto out_err; - ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM, - gpuobj, NULL); + ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); if (ret) goto out_err; - gpuobj = NULL; ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RW, &gpuobj, NULL); if (ret) goto out_err; - ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART, - gpuobj, NULL); + ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj); + nouveau_gpuobj_ref(NULL, &gpuobj); if (ret) goto out_err; return 0; + out_err: - nouveau_gpuobj_del(dev, &gpuobj); nouveau_channel_free(dev_priv->channel); dev_priv->channel = NULL; return ret; @@ -534,35 +569,28 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_display_early; - ret = nouveau_mem_detect(dev); + nouveau_pm_init(dev); + + ret = nouveau_mem_vram_init(dev); if (ret) goto out_bios; - ret = nouveau_gpuobj_early_init(dev); - if (ret) - goto out_bios; - - /* Initialise instance memory, must happen before mem_init so we - * know exactly how much VRAM we're able to use for "normal" - * purposes. - */ - ret = engine->instmem.init(dev); - if (ret) - goto out_gpuobj_early; - - /* Setup the memory manager */ - ret = nouveau_mem_init(dev); - if (ret) - goto out_instmem; - ret = nouveau_gpuobj_init(dev); if (ret) - goto out_mem; + goto out_vram; + + ret = engine->instmem.init(dev); + if (ret) + goto out_gpuobj; + + ret = nouveau_mem_gart_init(dev); + if (ret) + goto out_instmem; /* PMC */ ret = engine->mc.init(dev); if (ret) - goto out_gpuobj; + goto out_gart; /* PGPIO */ ret = engine->gpio.init(dev); @@ -611,9 +639,13 @@ nouveau_card_init(struct drm_device *dev) /* what about PVIDEO/PCRTC/PRAMDAC etc? */ if (!engine->graph.accel_blocked) { - ret = nouveau_card_init_channel(dev); + ret = nouveau_fence_init(dev); if (ret) goto out_irq; + + ret = nouveau_card_init_channel(dev); + if (ret) + goto out_fence; } ret = nouveau_backlight_init(dev); @@ -624,6 +656,8 @@ nouveau_card_init(struct drm_device *dev) drm_kms_helper_poll_init(dev); return 0; +out_fence: + nouveau_fence_fini(dev); out_irq: drm_irq_uninstall(dev); out_display: @@ -642,16 +676,16 @@ out_gpio: engine->gpio.takedown(dev); out_mc: engine->mc.takedown(dev); -out_gpuobj: - nouveau_gpuobj_takedown(dev); -out_mem: - nouveau_sgdma_takedown(dev); - nouveau_mem_close(dev); +out_gart: + nouveau_mem_gart_fini(dev); out_instmem: engine->instmem.takedown(dev); -out_gpuobj_early: - nouveau_gpuobj_late_takedown(dev); +out_gpuobj: + nouveau_gpuobj_takedown(dev); +out_vram: + nouveau_mem_vram_fini(dev); out_bios: + nouveau_pm_fini(dev); nouveau_bios_takedown(dev); out_display_early: engine->display.late_takedown(dev); @@ -667,7 +701,8 @@ static void nouveau_card_takedown(struct drm_device *dev) nouveau_backlight_exit(dev); - if (dev_priv->channel) { + if (!engine->graph.accel_blocked) { + nouveau_fence_fini(dev); nouveau_channel_free(dev_priv->channel); dev_priv->channel = NULL; } @@ -686,15 +721,15 @@ static void nouveau_card_takedown(struct drm_device *dev) ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); mutex_unlock(&dev->struct_mutex); - nouveau_sgdma_takedown(dev); + nouveau_mem_gart_fini(dev); - nouveau_gpuobj_takedown(dev); - nouveau_mem_close(dev); engine->instmem.takedown(dev); + nouveau_gpuobj_takedown(dev); + nouveau_mem_vram_fini(dev); drm_irq_uninstall(dev); - nouveau_gpuobj_late_takedown(dev); + nouveau_pm_fini(dev); nouveau_bios_takedown(dev); vga_client_register(dev->pdev, NULL, NULL, NULL); @@ -1057,7 +1092,7 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, /* Waits for PGRAPH to go completely idle */ bool nouveau_wait_for_idle(struct drm_device *dev) { - if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { + if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", nv_rd32(dev, NV04_PGRAPH_STATUS)); return false; diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c new file mode 100644 index 000000000000..16bbbf1eff63 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -0,0 +1,309 @@ +/* + * Copyright 2010 PathScale inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Martin Peres + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +static void +nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; + struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp; + int i, headerlen, recordlen, entries; + + if (!temp) { + NV_DEBUG(dev, "temperature table pointer invalid\n"); + return; + } + + /* Set the default sensor's contants */ + sensor->offset_constant = 0; + sensor->offset_mult = 1; + sensor->offset_div = 1; + sensor->slope_mult = 1; + sensor->slope_div = 1; + + /* Set the default temperature thresholds */ + temps->critical = 110; + temps->down_clock = 100; + temps->fan_boost = 90; + + /* Set the known default values to setup the temperature sensor */ + if (dev_priv->card_type >= NV_40) { + switch (dev_priv->chipset) { + case 0x43: + sensor->offset_mult = 32060; + sensor->offset_div = 1000; + sensor->slope_mult = 792; + sensor->slope_div = 1000; + break; + + case 0x44: + case 0x47: + case 0x4a: + sensor->offset_mult = 27839; + sensor->offset_div = 1000; + sensor->slope_mult = 780; + sensor->slope_div = 1000; + break; + + case 0x46: + sensor->offset_mult = -24775; + sensor->offset_div = 100; + sensor->slope_mult = 467; + sensor->slope_div = 10000; + break; + + case 0x49: + sensor->offset_mult = -25051; + sensor->offset_div = 100; + sensor->slope_mult = 458; + sensor->slope_div = 10000; + break; + + case 0x4b: + sensor->offset_mult = -24088; + sensor->offset_div = 100; + sensor->slope_mult = 442; + sensor->slope_div = 10000; + break; + + case 0x50: + sensor->offset_mult = -22749; + sensor->offset_div = 100; + sensor->slope_mult = 431; + sensor->slope_div = 10000; + break; + } + } + + headerlen = temp[1]; + recordlen = temp[2]; + entries = temp[3]; + temp = temp + headerlen; + + /* Read the entries from the table */ + for (i = 0; i < entries; i++) { + u16 value = ROM16(temp[1]); + + switch (temp[0]) { + case 0x01: + if ((value & 0x8f) == 0) + sensor->offset_constant = (value >> 9) & 0x7f; + break; + + case 0x04: + if ((value & 0xf00f) == 0xa000) /* core */ + temps->critical = (value&0x0ff0) >> 4; + break; + + case 0x07: + if ((value & 0xf00f) == 0xa000) /* core */ + temps->down_clock = (value&0x0ff0) >> 4; + break; + + case 0x08: + if ((value & 0xf00f) == 0xa000) /* core */ + temps->fan_boost = (value&0x0ff0) >> 4; + break; + + case 0x10: + sensor->offset_mult = value; + break; + + case 0x11: + sensor->offset_div = value; + break; + + case 0x12: + sensor->slope_mult = value; + break; + + case 0x13: + sensor->slope_div = value; + break; + } + temp += recordlen; + } + + nouveau_temp_safety_checks(dev); +} + +static int +nv40_sensor_setup(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; + u32 offset = sensor->offset_mult / sensor->offset_div; + u32 sensor_calibration; + + /* set up the sensors */ + sensor_calibration = 120 - offset - sensor->offset_constant; + sensor_calibration = sensor_calibration * sensor->slope_div / + sensor->slope_mult; + + if (dev_priv->chipset >= 0x46) + sensor_calibration |= 0x80000000; + else + sensor_calibration |= 0x10000000; + + nv_wr32(dev, 0x0015b0, sensor_calibration); + + /* Wait for the sensor to update */ + msleep(5); + + /* read */ + return nv_rd32(dev, 0x0015b4) & 0x1fff; +} + +int +nv40_temp_get(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants; + int offset = sensor->offset_mult / sensor->offset_div; + int core_temp; + + if (dev_priv->chipset >= 0x50) { + core_temp = nv_rd32(dev, 0x20008); + } else { + core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; + /* Setup the sensor if the temperature is 0 */ + if (core_temp == 0) + core_temp = nv40_sensor_setup(dev); + } + + core_temp = core_temp * sensor->slope_mult / sensor->slope_div; + core_temp = core_temp + offset + sensor->offset_constant; + + return core_temp; +} + +int +nv84_temp_get(struct drm_device *dev) +{ + return nv_rd32(dev, 0x20400); +} + +void +nouveau_temp_safety_checks(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp; + + if (temps->critical > 120) + temps->critical = 120; + else if (temps->critical < 80) + temps->critical = 80; + + if (temps->down_clock > 110) + temps->down_clock = 110; + else if (temps->down_clock < 60) + temps->down_clock = 60; + + if (temps->fan_boost > 100) + temps->fan_boost = 100; + else if (temps->fan_boost < 40) + temps->fan_boost = 40; +} + +static bool +probe_monitoring_device(struct nouveau_i2c_chan *i2c, + struct i2c_board_info *info) +{ + char modalias[16] = "i2c:"; + struct i2c_client *client; + + strlcat(modalias, info->type, sizeof(modalias)); + request_module(modalias); + + client = i2c_new_device(&i2c->adapter, info); + if (!client) + return false; + + if (!client->driver || client->driver->detect(client, info)) { + i2c_unregister_device(client); + return false; + } + + return true; +} + +static void +nouveau_temp_probe_i2c(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct dcb_table *dcb = &dev_priv->vbios.dcb; + struct i2c_board_info info[] = { + { I2C_BOARD_INFO("w83l785ts", 0x2d) }, + { I2C_BOARD_INFO("w83781d", 0x2d) }, + { I2C_BOARD_INFO("f75375", 0x2e) }, + { I2C_BOARD_INFO("adt7473", 0x2e) }, + { I2C_BOARD_INFO("lm99", 0x4c) }, + { } + }; + int idx = (dcb->version >= 0x40 ? + dcb->i2c_default_indices & 0xf : 2); + + nouveau_i2c_identify(dev, "monitoring device", info, + probe_monitoring_device, idx); +} + +void +nouveau_temp_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; + struct bit_entry P; + u8 *temp = NULL; + + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version == 1) + temp = ROMPTR(bios, P.data[12]); + else if (P.version == 2) + temp = ROMPTR(bios, P.data[16]); + else + NV_WARN(dev, "unknown temp for BIT P %d\n", P.version); + + nouveau_temp_vbios_parse(dev, temp); + } + + nouveau_temp_probe_i2c(dev); +} + +void +nouveau_temp_fini(struct drm_device *dev) +{ + +} diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c new file mode 100644 index 000000000000..04fdc00a67d5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_volt.c @@ -0,0 +1,212 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_pm.h" + +static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a }; +static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); + +int +nouveau_voltage_gpio_get(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + u8 vid = 0; + int i; + + for (i = 0; i < nr_vidtag; i++) { + if (!(volt->vid_mask & (1 << i))) + continue; + + vid |= gpio->get(dev, vidtag[i]) << i; + } + + return nouveau_volt_lvl_lookup(dev, vid); +} + +int +nouveau_voltage_gpio_set(struct drm_device *dev, int voltage) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + int vid, i; + + vid = nouveau_volt_vid_lookup(dev, voltage); + if (vid < 0) + return vid; + + for (i = 0; i < nr_vidtag; i++) { + if (!(volt->vid_mask & (1 << i))) + continue; + + gpio->set(dev, vidtag[i], !!(vid & (1 << i))); + } + + return 0; +} + +int +nouveau_volt_vid_lookup(struct drm_device *dev, int voltage) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + int i; + + for (i = 0; i < volt->nr_level; i++) { + if (volt->level[i].voltage == voltage) + return volt->level[i].vid; + } + + return -ENOENT; +} + +int +nouveau_volt_lvl_lookup(struct drm_device *dev, int vid) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + int i; + + for (i = 0; i < volt->nr_level; i++) { + if (volt->level[i].vid == vid) + return volt->level[i].voltage; + } + + return -ENOENT; +} + +void +nouveau_volt_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_voltage *voltage = &pm->voltage; + struct nvbios *bios = &dev_priv->vbios; + struct bit_entry P; + u8 *volt = NULL, *entry; + int i, headerlen, recordlen, entries, vidmask, vidshift; + + if (bios->type == NVBIOS_BIT) { + if (bit_table(dev, 'P', &P)) + return; + + if (P.version == 1) + volt = ROMPTR(bios, P.data[16]); + else + if (P.version == 2) + volt = ROMPTR(bios, P.data[12]); + else { + NV_WARN(dev, "unknown volt for BIT P %d\n", P.version); + } + } else { + if (bios->data[bios->offset + 6] < 0x27) { + NV_DEBUG(dev, "BMP version too old for voltage\n"); + return; + } + + volt = ROMPTR(bios, bios->data[bios->offset + 0x98]); + } + + if (!volt) { + NV_DEBUG(dev, "voltage table pointer invalid\n"); + return; + } + + switch (volt[0]) { + case 0x10: + case 0x11: + case 0x12: + headerlen = 5; + recordlen = volt[1]; + entries = volt[2]; + vidshift = 0; + vidmask = volt[4]; + break; + case 0x20: + headerlen = volt[1]; + recordlen = volt[3]; + entries = volt[2]; + vidshift = 0; /* could be vidshift like 0x30? */ + vidmask = volt[5]; + break; + case 0x30: + headerlen = volt[1]; + recordlen = volt[2]; + entries = volt[3]; + vidshift = hweight8(volt[5]); + vidmask = volt[4]; + break; + default: + NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); + return; + } + + /* validate vid mask */ + voltage->vid_mask = vidmask; + if (!voltage->vid_mask) + return; + + i = 0; + while (vidmask) { + if (i > nr_vidtag) { + NV_DEBUG(dev, "vid bit %d unknown\n", i); + return; + } + + if (!nouveau_bios_gpio_entry(dev, vidtag[i])) { + NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i); + return; + } + + vidmask >>= 1; + i++; + } + + /* parse vbios entries into common format */ + voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL); + if (!voltage->level) + return; + + entry = volt + headerlen; + for (i = 0; i < entries; i++, entry += recordlen) { + voltage->level[i].voltage = entry[0]; + voltage->level[i].vid = entry[1] >> vidshift; + } + voltage->nr_level = entries; + voltage->supported = true; +} + +void +nouveau_volt_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; + + kfree(volt->level); +} diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 497df8765f28..c71abc2a34d5 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -33,6 +33,7 @@ #include "nouveau_fb.h" #include "nouveau_hw.h" #include "nvreg.h" +#include "nouveau_fbcon.h" static int nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, @@ -109,7 +110,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod struct nouveau_pll_vals *pv = ®p->pllvals; struct pll_lims pll_lim; - if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim)) + if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim)) return; /* NM2 == 0 is used to determine single stage mode on two stage plls */ @@ -718,6 +719,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); kfree(nv_crtc); } @@ -768,8 +770,9 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, } static int -nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *passed_fb, + int x, int y, bool atomic) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -780,13 +783,26 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, int arb_burst, arb_lwm; int ret; - ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; + /* If atomic, we want to switch to the fb we were passed, so + * now we update pointers to do that. (We don't pin; just + * assume we're already pinned and update the base address.) + */ + if (atomic) { + drm_fb = passed_fb; + fb = nouveau_framebuffer(passed_fb); + } + else { + /* If not atomic, we can go ahead and pin, and unpin the + * old fb we were passed. + */ + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; - if (old_fb) { - struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb); - nouveau_bo_unpin(ofb->nvbo); + if (passed_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); + nouveau_bo_unpin(ofb->nvbo); + } } nv_crtc->fb.offset = fb->nvbo->bo.offset; @@ -826,7 +842,7 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); - if (dev_priv->card_type >= NV_30) { + if (dev_priv->card_type >= NV_20) { regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); } @@ -834,6 +850,29 @@ nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, return 0; } +static int +nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); +} + +static int +nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; + struct drm_device *dev = dev_priv->dev; + + if (state == ENTER_ATOMIC_MODE_SET) + nouveau_fbcon_save_disable_accel(dev); + else + nouveau_fbcon_restore_accel(dev); + + return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); +} + static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, struct nouveau_bo *dst) { @@ -962,6 +1001,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { .mode_fixup = nv_crtc_mode_fixup, .mode_set = nv_crtc_mode_set, .mode_set_base = nv04_crtc_mode_set_base, + .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, .load_lut = nv_crtc_gamma_load, }; diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index ea3627041ecf..ba6423f2ffcc 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -291,6 +291,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) msleep(5); sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); + /* do it again just in case it's a residual current */ + sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, @@ -343,22 +345,13 @@ static void nv04_dac_prepare(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *helper = encoder->helper_private; struct drm_device *dev = encoder->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; int head = nouveau_crtc(encoder->crtc)->index; - struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg; helper->dpms(encoder, DRM_MODE_DPMS_OFF); nv04_dfp_disable(dev, head); - - /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) - * at LCD__INDEX which we don't alter - */ - if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44)) - crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0; } - static void nv04_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index 0d3206a7046c..c936403b26e2 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -104,6 +104,8 @@ void nv04_dfp_disable(struct drm_device *dev, int head) } /* don't inadvertently turn it on when state written later */ crtcstate[head].fp_control = FP_TG_CONTROL_OFF; + crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &= + ~NV_CIO_CRE_LCD_ROUTE_MASK; } void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode) @@ -253,26 +255,21 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder) nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); - /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) - * at LCD__INDEX which we don't alter - */ - if (!(*cr_lcd & 0x44)) { - *cr_lcd = 0x3; + *cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3; - if (nv_two_heads(dev)) { - if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP) - *cr_lcd |= head ? 0x0 : 0x8; - else { - *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; - if (nv_encoder->dcb->type == OUTPUT_LVDS) - *cr_lcd |= 0x30; - if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { - /* avoid being connected to both crtcs */ - *cr_lcd_oth &= ~0x30; - NVWriteVgaCrtc(dev, head ^ 1, - NV_CIO_CRE_LCD__INDEX, - *cr_lcd_oth); - } + if (nv_two_heads(dev)) { + if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP) + *cr_lcd |= head ? 0x0 : 0x8; + else { + *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; + if (nv_encoder->dcb->type == OUTPUT_LVDS) + *cr_lcd |= 0x30; + if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { + /* avoid being connected to both crtcs */ + *cr_lcd_oth &= ~0x30; + NVWriteVgaCrtc(dev, head ^ 1, + NV_CIO_CRE_LCD__INDEX, + *cr_lcd_oth); } } } @@ -640,7 +637,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) get_tmds_slave(encoder)) return; - type = nouveau_i2c_identify(dev, "TMDS transmitter", info, 2); + type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2); if (type < 0) return; diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 1eeac4fae73d..33e4c9388bc1 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -25,6 +25,7 @@ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_ramht.h" #include "nouveau_fbcon.h" void @@ -169,11 +170,9 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) if (ret) return ret; - ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL); - if (ret) - return ret; - - return 0; + ret = nouveau_ramht_insert(dev_priv->channel, handle, obj); + nouveau_gpuobj_ref(NULL, &obj); + return ret; } int diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 06cedd99c26a..708293b7ddcd 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -27,8 +27,9 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" -#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE)) +#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) #define NV04_RAMFC__SIZE 32 #define NV04_RAMFC_DMA_PUT 0x00 #define NV04_RAMFC_DMA_GET 0x04 @@ -38,10 +39,8 @@ #define NV04_RAMFC_ENGINE 0x14 #define NV04_RAMFC_PULL1_ENGINE 0x18 -#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \ - NV04_RAMFC_##offset/4, (val)) -#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \ - NV04_RAMFC_##offset/4) +#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val)) +#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset) void nv04_fifo_disable(struct drm_device *dev) @@ -71,38 +70,33 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable) return (reassign == 1); } -bool -nv04_fifo_cache_flush(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; - uint64_t start = ptimer->read(dev); - - do { - if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) == - nv_rd32(dev, NV03_PFIFO_CACHE1_PUT)) - return true; - - } while (ptimer->read(dev) - start < 100000000); - - NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n"); - - return false; -} - bool nv04_fifo_cache_pull(struct drm_device *dev, bool enable) { - uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0); + int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable); + + if (!enable) { + /* In some cases the PFIFO puller may be left in an + * inconsistent state if you try to stop it when it's + * busy translating handles. Sometimes you get a + * PFIFO_CACHE_ERROR, sometimes it just fails silently + * sending incorrect instance offsets to PGRAPH after + * it's started up again. To avoid the latter we + * invalidate the most recently calculated instance. + */ + if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0, + NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0)) + NV_ERROR(dev, "Timeout idling the PFIFO puller.\n"); + + if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) & + NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_CACHE_ERROR); - if (enable) { - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1); - } else { - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1); nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); } - return !!(pull & 1); + return pull & 1; } int @@ -130,7 +124,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) NV04_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, - NULL, &chan->ramfc); + &chan->ramfc); if (ret) return ret; @@ -139,7 +133,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) /* Setup initial state */ RAMFC_WR(DMA_PUT, chan->pushbuf_base); RAMFC_WR(DMA_GET, chan->pushbuf_base); - RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); + RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4); RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -161,7 +155,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan) nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); } static void @@ -264,10 +258,10 @@ nv04_fifo_init_ramxx(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((dev_priv->ramht_bits - 9) << 16) | - (dev_priv->ramht_offset >> 8)); - nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); - nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8); + ((dev_priv->ramht->bits - 9) << 16) | + (dev_priv->ramht->gpuobj->pinst >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); + nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); } static void diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 4408232d33f1..0b5ae297abde 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -1,6 +1,7 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" /* returns the size of fifo context */ static int @@ -17,102 +18,51 @@ nouveau_fifo_ctx_size(struct drm_device *dev) return 32; } -static void -nv04_instmem_determine_amount(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - int i; - - /* Figure out how much instance memory we need */ - if (dev_priv->card_type >= NV_40) { - /* We'll want more instance memory than this on some NV4x cards. - * There's a 16MB aperture to play with that maps onto the end - * of vram. For now, only reserve a small piece until we know - * more about what each chipset requires. - */ - switch (dev_priv->chipset) { - case 0x40: - case 0x47: - case 0x49: - case 0x4b: - dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024); - break; - default: - dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024); - break; - } - } else { - /*XXX: what *are* the limits on ramin_rsvd_vram = (512 * 1024); - } - NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10); - - /* Clear all of it, except the BIOS image that's in the first 64KiB */ - for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4) - nv_wi32(dev, i, 0x00000000); -} - -static void -nv04_instmem_configure_fixed_tables(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - - /* FIFO hash table (RAMHT) - * use 4k hash table at RAMIN+0x10000 - * TODO: extend the hash table - */ - dev_priv->ramht_offset = 0x10000; - dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */ - dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */ - NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset, - dev_priv->ramht_size); - - /* FIFO runout table (RAMRO) - 512k at 0x11200 */ - dev_priv->ramro_offset = 0x11200; - dev_priv->ramro_size = 512; - NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset, - dev_priv->ramro_size); - - /* FIFO context table (RAMFC) - * NV40 : Not sure exactly how to position RAMFC on some cards, - * 0x30002 seems to position it at RAMIN+0x20000 on these - * cards. RAMFC is 4kb (32 fifos, 128byte entries). - * Others: Position RAMFC at RAMIN+0x11400 - */ - dev_priv->ramfc_size = engine->fifo.channels * - nouveau_fifo_ctx_size(dev); - switch (dev_priv->card_type) { - case NV_40: - dev_priv->ramfc_offset = 0x20000; - break; - case NV_30: - case NV_20: - case NV_10: - case NV_04: - default: - dev_priv->ramfc_offset = 0x11400; - break; - } - NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, - dev_priv->ramfc_size); -} - int nv04_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t offset; + struct nouveau_gpuobj *ramht = NULL; + u32 offset, length; int ret; - nv04_instmem_determine_amount(dev); - nv04_instmem_configure_fixed_tables(dev); + /* RAMIN always available */ + dev_priv->ramin_available = true; - /* Create a heap to manage RAMIN allocations, we don't allocate - * the space that was reserved for RAMHT/FC/RO. - */ - offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; + /* Setup shared RAMHT */ + ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096, + NVOBJ_FLAG_ZERO_ALLOC, &ramht); + if (ret) + return ret; + + ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + if (ret) + return ret; + + /* And RAMRO */ + ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512, + NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro); + if (ret) + return ret; + + /* And RAMFC */ + length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev); + switch (dev_priv->card_type) { + case NV_40: + offset = 0x20000; + break; + default: + offset = 0x11400; + break; + } + + ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length, + NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc); + if (ret) + return ret; + + /* Only allow space after RAMFC to be used for object allocation */ + offset += length; /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0 @@ -140,46 +90,34 @@ int nv04_instmem_init(struct drm_device *dev) void nv04_instmem_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL); + nouveau_gpuobj_ref(NULL, &dev_priv->ramro); + nouveau_gpuobj_ref(NULL, &dev_priv->ramfc); } int -nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz) +nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, + uint32_t *sz) { - if (gpuobj->im_backing) - return -EINVAL; - return 0; } void nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (gpuobj && gpuobj->im_backing) { - if (gpuobj->im_bound) - dev_priv->engine.instmem.unbind(dev, gpuobj); - gpuobj->im_backing = NULL; - } } int nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - if (!gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; - - gpuobj->im_bound = 1; return 0; } int nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { - if (gpuobj->im_bound == 0) - return -EINVAL; - - gpuobj->im_bound = 0; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c new file mode 100644 index 000000000000..6a6eb697d38e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv04_pm.c @@ -0,0 +1,81 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_hw.h" +#include "nouveau_pm.h" + +struct nv04_pm_state { + struct pll_lims pll; + struct nouveau_pll_vals calc; +}; + +int +nv04_pm_clock_get(struct drm_device *dev, u32 id) +{ + return nouveau_hw_get_clock(dev, id); +} + +void * +nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, + u32 id, int khz) +{ + struct nv04_pm_state *state; + int ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + ret = get_pll_limits(dev, id, &state->pll); + if (ret) { + kfree(state); + return (ret == -ENOENT) ? NULL : ERR_PTR(ret); + } + + ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc); + if (!ret) { + kfree(state); + return ERR_PTR(-EINVAL); + } + + return state; +} + +void +nv04_pm_clock_set(struct drm_device *dev, void *pre_state) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv04_pm_state *state = pre_state; + u32 reg = state->pll.reg; + + /* thank the insane nouveau_hw_setpll() interface for this */ + if (dev_priv->card_type >= NV_40) + reg += 4; + + nouveau_hw_setpll(dev, reg, &state->calc); + kfree(state); +} + diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c index 0b5d012d7c28..3eb605ddfd03 100644 --- a/drivers/gpu/drm/nouveau/nv04_tv.c +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -49,8 +49,8 @@ static struct i2c_board_info nv04_tv_encoder_info[] = { int nv04_tv_identify(struct drm_device *dev, int i2c_index) { - return nouveau_i2c_identify(dev, "TV encoder", - nv04_tv_encoder_info, i2c_index); + return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info, + NULL, i2c_index); } @@ -99,12 +99,10 @@ static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) state->tv_setup = 0; - if (bind) { - state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0; + if (bind) state->CRTC[NV_CIO_CRE_49] |= 0x10; - } else { + else state->CRTC[NV_CIO_CRE_49] &= ~0x10; - } NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, state->CRTC[NV_CIO_CRE_LCD__INDEX]); diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index 7a4069cf5d0b..f1b03ad58fd5 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -27,8 +27,9 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" -#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE)) +#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE)) #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) int @@ -48,7 +49,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan) ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); + NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); if (ret) return ret; @@ -57,7 +58,7 @@ nv10_fifo_create_context(struct nouveau_channel *chan) */ nv_wi32(dev, fc + 0, chan->pushbuf_base); nv_wi32(dev, fc + 4, chan->pushbuf_base); - nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); + nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -80,7 +81,7 @@ nv10_fifo_destroy_context(struct nouveau_channel *chan) nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); } static void @@ -202,14 +203,14 @@ nv10_fifo_init_ramxx(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((dev_priv->ramht_bits - 9) << 16) | - (dev_priv->ramht_offset >> 8)); - nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + ((dev_priv->ramht->bits - 9) << 16) | + (dev_priv->ramht->gpuobj->pinst >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); if (dev_priv->chipset < 0x17) { - nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8); + nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8); } else { - nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) | + nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) | (1 << 16) /* 64 Bytes entry*/); /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */ } diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index b2f6a57c0cc5..8e68c9731159 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -803,7 +803,7 @@ nv10_graph_context_switch(struct drm_device *dev) /* Load context for next channel */ chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; chan = dev_priv->fifos[chid]; - if (chan) + if (chan && chan->pgraph_ctx) nv10_graph_load_context(chan); pgraph->fifo_access(dev, true); diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 13cdc05b7c2d..28119fd19d03 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -193,55 +193,56 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) } } -static const struct { - int hdisplay; - int vdisplay; -} modes[] = { - { 640, 400 }, - { 640, 480 }, - { 720, 480 }, - { 720, 576 }, - { 800, 600 }, - { 1024, 768 }, - { 1280, 720 }, - { 1280, 1024 }, - { 1920, 1080 } -}; - -static int nv17_tv_get_modes(struct drm_encoder *encoder, - struct drm_connector *connector) +static int nv17_tv_get_ld_modes(struct drm_encoder *encoder, + struct drm_connector *connector) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); - struct drm_display_mode *mode; - struct drm_display_mode *output_mode; + struct drm_display_mode *mode, *tv_mode; int n = 0; - int i; - if (tv_norm->kind != CTV_ENC_MODE) { - struct drm_display_mode *tv_mode; + for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { + mode = drm_mode_duplicate(encoder->dev, tv_mode); - for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { - mode = drm_mode_duplicate(encoder->dev, tv_mode); + mode->clock = tv_norm->tv_enc_mode.vrefresh * + mode->htotal / 1000 * + mode->vtotal / 1000; - mode->clock = tv_norm->tv_enc_mode.vrefresh * - mode->htotal / 1000 * - mode->vtotal / 1000; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + mode->clock *= 2; - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - mode->clock *= 2; + if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && + mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) + mode->type |= DRM_MODE_TYPE_PREFERRED; - if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && - mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) - mode->type |= DRM_MODE_TYPE_PREFERRED; - - drm_mode_probed_add(connector, mode); - n++; - } - return n; + drm_mode_probed_add(connector, mode); + n++; } - /* tv_norm->kind == CTV_ENC_MODE */ - output_mode = &tv_norm->ctv_enc_mode.mode; + return n; +} + +static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; + struct drm_display_mode *mode; + const struct { + int hdisplay; + int vdisplay; + } modes[] = { + { 640, 400 }, + { 640, 480 }, + { 720, 480 }, + { 720, 576 }, + { 800, 600 }, + { 1024, 768 }, + { 1280, 720 }, + { 1280, 1024 }, + { 1920, 1080 } + }; + int i, n = 0; + for (i = 0; i < ARRAY_SIZE(modes); i++) { if (modes[i].hdisplay > output_mode->hdisplay || modes[i].vdisplay > output_mode->vdisplay) @@ -251,11 +252,12 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder, modes[i].vdisplay == output_mode->vdisplay) { mode = drm_mode_duplicate(encoder->dev, output_mode); mode->type |= DRM_MODE_TYPE_PREFERRED; + } else { mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay, - modes[i].vdisplay, 60, false, - output_mode->flags & DRM_MODE_FLAG_INTERLACE, - false); + modes[i].vdisplay, 60, false, + (output_mode->flags & + DRM_MODE_FLAG_INTERLACE), false); } /* CVT modes are sometimes unsuitable... */ @@ -266,6 +268,7 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder, - mode->hdisplay) * 9 / 10) & ~7; mode->hsync_end = mode->hsync_start + 8; } + if (output_mode->vdisplay >= 1024) { mode->vtotal = output_mode->vtotal; mode->vsync_start = output_mode->vsync_start; @@ -276,9 +279,21 @@ static int nv17_tv_get_modes(struct drm_encoder *encoder, drm_mode_probed_add(connector, mode); n++; } + return n; } +static int nv17_tv_get_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + + if (tv_norm->kind == CTV_ENC_MODE) + return nv17_tv_get_hd_modes(encoder, connector); + else + return nv17_tv_get_ld_modes(encoder, connector); +} + static int nv17_tv_mode_valid(struct drm_encoder *encoder, struct drm_display_mode *mode) { @@ -408,15 +423,8 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) } - /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f) - * at LCD__INDEX which we don't alter - */ - if (!(*cr_lcd & 0x44)) { - if (tv_norm->kind == CTV_ENC_MODE) - *cr_lcd = 0x1 | (head ? 0x0 : 0x8); - else - *cr_lcd = 0; - } + if (tv_norm->kind == CTV_ENC_MODE) + *cr_lcd |= 0x1 | (head ? 0x0 : 0x8); /* Set the DACCLK register */ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h index c00977cedabd..6bf03840f9eb 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.h +++ b/drivers/gpu/drm/nouveau/nv17_tv.h @@ -127,7 +127,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder); /* TV hardware access functions */ -static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val) +static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, + uint32_t val) { nv_wr32(dev, reg, val); } @@ -137,7 +138,8 @@ static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) return nv_rd32(dev, reg); } -static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val) +static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, + uint8_t val) { nv_write_ptv(dev, NV_PTV_TV_INDEX, reg); nv_write_ptv(dev, NV_PTV_TV_DATA, val); @@ -149,8 +151,11 @@ static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg) return nv_read_ptv(dev, NV_PTV_TV_DATA); } -#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg) -#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg) -#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg]) +#define nv_load_ptv(dev, state, reg) \ + nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg) +#define nv_save_ptv(dev, state, reg) \ + state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg) +#define nv_load_tv_enc(dev, state, reg) \ + nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg]) #endif diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c index d64683d97e0d..9d3893c50a41 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c +++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c @@ -336,12 +336,17 @@ static void tv_setup_filter(struct drm_encoder *encoder) struct filter_params *p = &fparams[k][j]; for (i = 0; i < 7; i++) { - int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i) - + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k] - + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker - + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k]; + int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + + p->ki3*i*i*i) + + (p->kr + p->kir*i + p->ki2r*i*i + + p->ki3r*i*i*i) * rs[k] + + (p->kf + p->kif*i + p->ki2f*i*i + + p->ki3f*i*i*i) * flicker + + (p->krf + p->kirf*i + p->ki2rf*i*i + + p->ki3rf*i*i*i) * flicker * rs[k]; - (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9); + (*filters[k])[j][i] = (c + id5/2) >> 39 + & (0x1 << 31 | 0x7f << 9); } } } @@ -349,7 +354,8 @@ static void tv_setup_filter(struct drm_encoder *encoder) /* Hardware state saving/restoring */ -static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) +static void tv_save_filter(struct drm_device *dev, uint32_t base, + uint32_t regs[4][7]) { int i, j; uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; @@ -360,7 +366,8 @@ static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[ } } -static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7]) +static void tv_load_filter(struct drm_device *dev, uint32_t base, + uint32_t regs[4][7]) { int i, j; uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c }; @@ -504,10 +511,10 @@ void nv17_tv_update_properties(struct drm_encoder *encoder) break; } - regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255, - tv_enc->saturation); - regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255, - tv_enc->saturation); + regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], + 255, tv_enc->saturation); + regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], + 255, tv_enc->saturation); regs->tv_enc[0x25] = tv_enc->hue * 255 / 100; nv_load_ptv(dev, regs, 204); @@ -541,7 +548,8 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder) int head = nouveau_crtc(encoder->crtc)->index; struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; struct drm_display_mode *crtc_mode = &encoder->crtc->mode; - struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode; + struct drm_display_mode *output_mode = + &get_tv_norm(encoder)->ctv_enc_mode.mode; int overscan, hmargin, vmargin, hratio, vratio; /* The rescaler doesn't do the right thing for interlaced modes. */ @@ -553,13 +561,15 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder) hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2; vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2; - hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin, - overscan); - vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin, - overscan); + hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), + hmargin, overscan); + vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), + vmargin, overscan); - hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin); - vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3; + hratio = crtc_mode->hdisplay * 0x800 / + (output_mode->hdisplay - 2*hmargin); + vratio = crtc_mode->vdisplay * 0x800 / + (output_mode->vdisplay - 2*vmargin) & ~3; regs->fp_horiz_regs[FP_VALID_START] = hmargin; regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1; diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 17f309b36c91..12ab9cd56eca 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -37,49 +37,49 @@ nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x033c/4, 0xffff0000); - nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x047c/4, 0x00000101); - nv_wo32(dev, ctx, 0x0490/4, 0x00000111); - nv_wo32(dev, ctx, 0x04a8/4, 0x44400000); + nv_wo32(ctx, 0x033c, 0xffff0000); + nv_wo32(ctx, 0x03a0, 0x0fff0000); + nv_wo32(ctx, 0x03a4, 0x0fff0000); + nv_wo32(ctx, 0x047c, 0x00000101); + nv_wo32(ctx, 0x0490, 0x00000111); + nv_wo32(ctx, 0x04a8, 0x44400000); for (i = 0x04d4; i <= 0x04e0; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x04f4; i <= 0x0500; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080000); + nv_wo32(ctx, i, 0x00080000); for (i = 0x050c; i <= 0x0518; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x051c; i <= 0x0528; i += 4) - nv_wo32(dev, ctx, i/4, 0x000105b8); + nv_wo32(ctx, i, 0x000105b8); for (i = 0x052c; i <= 0x0538; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(ctx, i, 0x00080008); for (i = 0x055c; i <= 0x0598; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x05fc/4, 0x00000001); - nv_wo32(dev, ctx, 0x0604/4, 0x00004000); - nv_wo32(dev, ctx, 0x0610/4, 0x00000001); - nv_wo32(dev, ctx, 0x0618/4, 0x00040000); - nv_wo32(dev, ctx, 0x061c/4, 0x00010000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x05a4, 0x4b7fffff); + nv_wo32(ctx, 0x05fc, 0x00000001); + nv_wo32(ctx, 0x0604, 0x00004000); + nv_wo32(ctx, 0x0610, 0x00000001); + nv_wo32(ctx, 0x0618, 0x00040000); + nv_wo32(ctx, 0x061c, 0x00010000); for (i = 0x1c1c; i <= 0x248c; i += 16) { - nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); - nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); - nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + nv_wo32(ctx, (i + 0), 0x10700ff9); + nv_wo32(ctx, (i + 4), 0x0436086c); + nv_wo32(ctx, (i + 8), 0x000c001b); } - nv_wo32(dev, ctx, 0x281c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2830/4, 0x3f800000); - nv_wo32(dev, ctx, 0x285c/4, 0x40000000); - nv_wo32(dev, ctx, 0x2860/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2864/4, 0x3f000000); - nv_wo32(dev, ctx, 0x286c/4, 0x40000000); - nv_wo32(dev, ctx, 0x2870/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2878/4, 0xbf800000); - nv_wo32(dev, ctx, 0x2880/4, 0xbf800000); - nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000); - nv_wo32(dev, ctx, 0x3530/4, 0x000003f8); - nv_wo32(dev, ctx, 0x3540/4, 0x002fe000); + nv_wo32(ctx, 0x281c, 0x3f800000); + nv_wo32(ctx, 0x2830, 0x3f800000); + nv_wo32(ctx, 0x285c, 0x40000000); + nv_wo32(ctx, 0x2860, 0x3f800000); + nv_wo32(ctx, 0x2864, 0x3f000000); + nv_wo32(ctx, 0x286c, 0x40000000); + nv_wo32(ctx, 0x2870, 0x3f800000); + nv_wo32(ctx, 0x2878, 0xbf800000); + nv_wo32(ctx, 0x2880, 0xbf800000); + nv_wo32(ctx, 0x34a4, 0x000fe000); + nv_wo32(ctx, 0x3530, 0x000003f8); + nv_wo32(ctx, 0x3540, 0x002fe000); for (i = 0x355c; i <= 0x3578; i += 4) - nv_wo32(dev, ctx, i/4, 0x001c527c); + nv_wo32(ctx, i, 0x001c527c); } static void @@ -87,58 +87,58 @@ nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x035c/4, 0xffff0000); - nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x049c/4, 0x00000101); - nv_wo32(dev, ctx, 0x04b0/4, 0x00000111); - nv_wo32(dev, ctx, 0x04c8/4, 0x00000080); - nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000); - nv_wo32(dev, ctx, 0x04d0/4, 0x00000001); - nv_wo32(dev, ctx, 0x04e4/4, 0x44400000); - nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000); + nv_wo32(ctx, 0x035c, 0xffff0000); + nv_wo32(ctx, 0x03c0, 0x0fff0000); + nv_wo32(ctx, 0x03c4, 0x0fff0000); + nv_wo32(ctx, 0x049c, 0x00000101); + nv_wo32(ctx, 0x04b0, 0x00000111); + nv_wo32(ctx, 0x04c8, 0x00000080); + nv_wo32(ctx, 0x04cc, 0xffff0000); + nv_wo32(ctx, 0x04d0, 0x00000001); + nv_wo32(ctx, 0x04e4, 0x44400000); + nv_wo32(ctx, 0x04fc, 0x4b800000); for (i = 0x0510; i <= 0x051c; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x0530; i <= 0x053c; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080000); + nv_wo32(ctx, i, 0x00080000); for (i = 0x0548; i <= 0x0554; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x0558; i <= 0x0564; i += 4) - nv_wo32(dev, ctx, i/4, 0x000105b8); + nv_wo32(ctx, i, 0x000105b8); for (i = 0x0568; i <= 0x0574; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(ctx, i, 0x00080008); for (i = 0x0598; i <= 0x05d4; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x0620/4, 0x00000080); - nv_wo32(dev, ctx, 0x0624/4, 0x30201000); - nv_wo32(dev, ctx, 0x0628/4, 0x70605040); - nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080); - nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0); - nv_wo32(dev, ctx, 0x0664/4, 0x00000001); - nv_wo32(dev, ctx, 0x066c/4, 0x00004000); - nv_wo32(dev, ctx, 0x0678/4, 0x00000001); - nv_wo32(dev, ctx, 0x0680/4, 0x00040000); - nv_wo32(dev, ctx, 0x0684/4, 0x00010000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x05e0, 0x4b7fffff); + nv_wo32(ctx, 0x0620, 0x00000080); + nv_wo32(ctx, 0x0624, 0x30201000); + nv_wo32(ctx, 0x0628, 0x70605040); + nv_wo32(ctx, 0x062c, 0xb0a09080); + nv_wo32(ctx, 0x0630, 0xf0e0d0c0); + nv_wo32(ctx, 0x0664, 0x00000001); + nv_wo32(ctx, 0x066c, 0x00004000); + nv_wo32(ctx, 0x0678, 0x00000001); + nv_wo32(ctx, 0x0680, 0x00040000); + nv_wo32(ctx, 0x0684, 0x00010000); for (i = 0x1b04; i <= 0x2374; i += 16) { - nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); - nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); - nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + nv_wo32(ctx, (i + 0), 0x10700ff9); + nv_wo32(ctx, (i + 4), 0x0436086c); + nv_wo32(ctx, (i + 8), 0x000c001b); } - nv_wo32(dev, ctx, 0x2704/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2718/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2744/4, 0x40000000); - nv_wo32(dev, ctx, 0x2748/4, 0x3f800000); - nv_wo32(dev, ctx, 0x274c/4, 0x3f000000); - nv_wo32(dev, ctx, 0x2754/4, 0x40000000); - nv_wo32(dev, ctx, 0x2758/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2760/4, 0xbf800000); - nv_wo32(dev, ctx, 0x2768/4, 0xbf800000); - nv_wo32(dev, ctx, 0x308c/4, 0x000fe000); - nv_wo32(dev, ctx, 0x3108/4, 0x000003f8); - nv_wo32(dev, ctx, 0x3468/4, 0x002fe000); + nv_wo32(ctx, 0x2704, 0x3f800000); + nv_wo32(ctx, 0x2718, 0x3f800000); + nv_wo32(ctx, 0x2744, 0x40000000); + nv_wo32(ctx, 0x2748, 0x3f800000); + nv_wo32(ctx, 0x274c, 0x3f000000); + nv_wo32(ctx, 0x2754, 0x40000000); + nv_wo32(ctx, 0x2758, 0x3f800000); + nv_wo32(ctx, 0x2760, 0xbf800000); + nv_wo32(ctx, 0x2768, 0xbf800000); + nv_wo32(ctx, 0x308c, 0x000fe000); + nv_wo32(ctx, 0x3108, 0x000003f8); + nv_wo32(ctx, 0x3468, 0x002fe000); for (i = 0x3484; i <= 0x34a0; i += 4) - nv_wo32(dev, ctx, i/4, 0x001c527c); + nv_wo32(ctx, i, 0x001c527c); } static void @@ -146,49 +146,49 @@ nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x033c/4, 0xffff0000); - nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x047c/4, 0x00000101); - nv_wo32(dev, ctx, 0x0490/4, 0x00000111); - nv_wo32(dev, ctx, 0x04a8/4, 0x44400000); + nv_wo32(ctx, 0x033c, 0xffff0000); + nv_wo32(ctx, 0x03a0, 0x0fff0000); + nv_wo32(ctx, 0x03a4, 0x0fff0000); + nv_wo32(ctx, 0x047c, 0x00000101); + nv_wo32(ctx, 0x0490, 0x00000111); + nv_wo32(ctx, 0x04a8, 0x44400000); for (i = 0x04d4; i <= 0x04e0; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x04f4; i <= 0x0500; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080000); + nv_wo32(ctx, i, 0x00080000); for (i = 0x050c; i <= 0x0518; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x051c; i <= 0x0528; i += 4) - nv_wo32(dev, ctx, i/4, 0x000105b8); + nv_wo32(ctx, i, 0x000105b8); for (i = 0x052c; i <= 0x0538; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); + nv_wo32(ctx, i, 0x00080008); for (i = 0x055c; i <= 0x0598; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x05fc/4, 0x00000001); - nv_wo32(dev, ctx, 0x0604/4, 0x00004000); - nv_wo32(dev, ctx, 0x0610/4, 0x00000001); - nv_wo32(dev, ctx, 0x0618/4, 0x00040000); - nv_wo32(dev, ctx, 0x061c/4, 0x00010000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x05a4, 0x4b7fffff); + nv_wo32(ctx, 0x05fc, 0x00000001); + nv_wo32(ctx, 0x0604, 0x00004000); + nv_wo32(ctx, 0x0610, 0x00000001); + nv_wo32(ctx, 0x0618, 0x00040000); + nv_wo32(ctx, 0x061c, 0x00010000); for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */ - nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9); - nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c); - nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b); + nv_wo32(ctx, (i + 0), 0x10700ff9); + nv_wo32(ctx, (i + 4), 0x0436086c); + nv_wo32(ctx, (i + 8), 0x000c001b); } - nv_wo32(dev, ctx, 0x269c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x26dc/4, 0x40000000); - nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000); - nv_wo32(dev, ctx, 0x26ec/4, 0x40000000); - nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000); - nv_wo32(dev, ctx, 0x2700/4, 0xbf800000); - nv_wo32(dev, ctx, 0x3024/4, 0x000fe000); - nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8); - nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000); + nv_wo32(ctx, 0x269c, 0x3f800000); + nv_wo32(ctx, 0x26b0, 0x3f800000); + nv_wo32(ctx, 0x26dc, 0x40000000); + nv_wo32(ctx, 0x26e0, 0x3f800000); + nv_wo32(ctx, 0x26e4, 0x3f000000); + nv_wo32(ctx, 0x26ec, 0x40000000); + nv_wo32(ctx, 0x26f0, 0x3f800000); + nv_wo32(ctx, 0x26f8, 0xbf800000); + nv_wo32(ctx, 0x2700, 0xbf800000); + nv_wo32(ctx, 0x3024, 0x000fe000); + nv_wo32(ctx, 0x30a0, 0x000003f8); + nv_wo32(ctx, 0x33fc, 0x002fe000); for (i = 0x341c; i <= 0x3438; i += 4) - nv_wo32(dev, ctx, i/4, 0x001c527c); + nv_wo32(ctx, i, 0x001c527c); } static void @@ -196,57 +196,57 @@ nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x0410/4, 0x00000101); - nv_wo32(dev, ctx, 0x0424/4, 0x00000111); - nv_wo32(dev, ctx, 0x0428/4, 0x00000060); - nv_wo32(dev, ctx, 0x0444/4, 0x00000080); - nv_wo32(dev, ctx, 0x0448/4, 0xffff0000); - nv_wo32(dev, ctx, 0x044c/4, 0x00000001); - nv_wo32(dev, ctx, 0x0460/4, 0x44400000); - nv_wo32(dev, ctx, 0x048c/4, 0xffff0000); + nv_wo32(ctx, 0x0410, 0x00000101); + nv_wo32(ctx, 0x0424, 0x00000111); + nv_wo32(ctx, 0x0428, 0x00000060); + nv_wo32(ctx, 0x0444, 0x00000080); + nv_wo32(ctx, 0x0448, 0xffff0000); + nv_wo32(ctx, 0x044c, 0x00000001); + nv_wo32(ctx, 0x0460, 0x44400000); + nv_wo32(ctx, 0x048c, 0xffff0000); for (i = 0x04e0; i < 0x04e8; i += 4) - nv_wo32(dev, ctx, i/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x04ec/4, 0x00011100); + nv_wo32(ctx, i, 0x0fff0000); + nv_wo32(ctx, 0x04ec, 0x00011100); for (i = 0x0508; i < 0x0548; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x058c/4, 0x00000080); - nv_wo32(dev, ctx, 0x0590/4, 0x30201000); - nv_wo32(dev, ctx, 0x0594/4, 0x70605040); - nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888); - nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8); - nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x0550, 0x4b7fffff); + nv_wo32(ctx, 0x058c, 0x00000080); + nv_wo32(ctx, 0x0590, 0x30201000); + nv_wo32(ctx, 0x0594, 0x70605040); + nv_wo32(ctx, 0x0598, 0xb8a89888); + nv_wo32(ctx, 0x059c, 0xf8e8d8c8); + nv_wo32(ctx, 0x05b0, 0xb0000000); for (i = 0x0600; i < 0x0640; i += 4) - nv_wo32(dev, ctx, i/4, 0x00010588); + nv_wo32(ctx, i, 0x00010588); for (i = 0x0640; i < 0x0680; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x06c0; i < 0x0700; i += 4) - nv_wo32(dev, ctx, i/4, 0x0008aae4); + nv_wo32(ctx, i, 0x0008aae4); for (i = 0x0700; i < 0x0740; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x0740; i < 0x0780; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); - nv_wo32(dev, ctx, 0x085c/4, 0x00040000); - nv_wo32(dev, ctx, 0x0860/4, 0x00010000); + nv_wo32(ctx, i, 0x00080008); + nv_wo32(ctx, 0x085c, 0x00040000); + nv_wo32(ctx, 0x0860, 0x00010000); for (i = 0x0864; i < 0x0874; i += 4) - nv_wo32(dev, ctx, i/4, 0x00040004); + nv_wo32(ctx, i, 0x00040004); for (i = 0x1f18; i <= 0x3088 ; i += 16) { - nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); - nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); - nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + nv_wo32(ctx, i + 0, 0x10700ff9); + nv_wo32(ctx, i + 1, 0x0436086c); + nv_wo32(ctx, i + 2, 0x000c001b); } for (i = 0x30b8; i < 0x30c8; i += 4) - nv_wo32(dev, ctx, i/4, 0x0000ffff); - nv_wo32(dev, ctx, 0x344c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3808/4, 0x3f800000); - nv_wo32(dev, ctx, 0x381c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3848/4, 0x40000000); - nv_wo32(dev, ctx, 0x384c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3850/4, 0x3f000000); - nv_wo32(dev, ctx, 0x3858/4, 0x40000000); - nv_wo32(dev, ctx, 0x385c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3864/4, 0xbf800000); - nv_wo32(dev, ctx, 0x386c/4, 0xbf800000); + nv_wo32(ctx, i, 0x0000ffff); + nv_wo32(ctx, 0x344c, 0x3f800000); + nv_wo32(ctx, 0x3808, 0x3f800000); + nv_wo32(ctx, 0x381c, 0x3f800000); + nv_wo32(ctx, 0x3848, 0x40000000); + nv_wo32(ctx, 0x384c, 0x3f800000); + nv_wo32(ctx, 0x3850, 0x3f000000); + nv_wo32(ctx, 0x3858, 0x40000000); + nv_wo32(ctx, 0x385c, 0x3f800000); + nv_wo32(ctx, 0x3864, 0xbf800000); + nv_wo32(ctx, 0x386c, 0xbf800000); } static void @@ -254,57 +254,57 @@ nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x040c/4, 0x01000101); - nv_wo32(dev, ctx, 0x0420/4, 0x00000111); - nv_wo32(dev, ctx, 0x0424/4, 0x00000060); - nv_wo32(dev, ctx, 0x0440/4, 0x00000080); - nv_wo32(dev, ctx, 0x0444/4, 0xffff0000); - nv_wo32(dev, ctx, 0x0448/4, 0x00000001); - nv_wo32(dev, ctx, 0x045c/4, 0x44400000); - nv_wo32(dev, ctx, 0x0480/4, 0xffff0000); + nv_wo32(ctx, 0x040c, 0x01000101); + nv_wo32(ctx, 0x0420, 0x00000111); + nv_wo32(ctx, 0x0424, 0x00000060); + nv_wo32(ctx, 0x0440, 0x00000080); + nv_wo32(ctx, 0x0444, 0xffff0000); + nv_wo32(ctx, 0x0448, 0x00000001); + nv_wo32(ctx, 0x045c, 0x44400000); + nv_wo32(ctx, 0x0480, 0xffff0000); for (i = 0x04d4; i < 0x04dc; i += 4) - nv_wo32(dev, ctx, i/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x04e0/4, 0x00011100); + nv_wo32(ctx, i, 0x0fff0000); + nv_wo32(ctx, 0x04e0, 0x00011100); for (i = 0x04fc; i < 0x053c; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x057c/4, 0x00000080); - nv_wo32(dev, ctx, 0x0580/4, 0x30201000); - nv_wo32(dev, ctx, 0x0584/4, 0x70605040); - nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888); - nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8); - nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x0544, 0x4b7fffff); + nv_wo32(ctx, 0x057c, 0x00000080); + nv_wo32(ctx, 0x0580, 0x30201000); + nv_wo32(ctx, 0x0584, 0x70605040); + nv_wo32(ctx, 0x0588, 0xb8a89888); + nv_wo32(ctx, 0x058c, 0xf8e8d8c8); + nv_wo32(ctx, 0x05a0, 0xb0000000); for (i = 0x05f0; i < 0x0630; i += 4) - nv_wo32(dev, ctx, i/4, 0x00010588); + nv_wo32(ctx, i, 0x00010588); for (i = 0x0630; i < 0x0670; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x06b0; i < 0x06f0; i += 4) - nv_wo32(dev, ctx, i/4, 0x0008aae4); + nv_wo32(ctx, i, 0x0008aae4); for (i = 0x06f0; i < 0x0730; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x0730; i < 0x0770; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); - nv_wo32(dev, ctx, 0x0850/4, 0x00040000); - nv_wo32(dev, ctx, 0x0854/4, 0x00010000); + nv_wo32(ctx, i, 0x00080008); + nv_wo32(ctx, 0x0850, 0x00040000); + nv_wo32(ctx, 0x0854, 0x00010000); for (i = 0x0858; i < 0x0868; i += 4) - nv_wo32(dev, ctx, i/4, 0x00040004); + nv_wo32(ctx, i, 0x00040004); for (i = 0x15ac; i <= 0x271c ; i += 16) { - nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); - nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); - nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + nv_wo32(ctx, i + 0, 0x10700ff9); + nv_wo32(ctx, i + 1, 0x0436086c); + nv_wo32(ctx, i + 2, 0x000c001b); } for (i = 0x274c; i < 0x275c; i += 4) - nv_wo32(dev, ctx, i/4, 0x0000ffff); - nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2edc/4, 0x40000000); - nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000); - nv_wo32(dev, ctx, 0x2eec/4, 0x40000000); - nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000); - nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000); - nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000); + nv_wo32(ctx, i, 0x0000ffff); + nv_wo32(ctx, 0x2ae0, 0x3f800000); + nv_wo32(ctx, 0x2e9c, 0x3f800000); + nv_wo32(ctx, 0x2eb0, 0x3f800000); + nv_wo32(ctx, 0x2edc, 0x40000000); + nv_wo32(ctx, 0x2ee0, 0x3f800000); + nv_wo32(ctx, 0x2ee4, 0x3f000000); + nv_wo32(ctx, 0x2eec, 0x40000000); + nv_wo32(ctx, 0x2ef0, 0x3f800000); + nv_wo32(ctx, 0x2ef8, 0xbf800000); + nv_wo32(ctx, 0x2f00, 0xbf800000); } static void @@ -312,57 +312,57 @@ nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { int i; - nv_wo32(dev, ctx, 0x040c/4, 0x00000101); - nv_wo32(dev, ctx, 0x0420/4, 0x00000111); - nv_wo32(dev, ctx, 0x0424/4, 0x00000060); - nv_wo32(dev, ctx, 0x0440/4, 0x00000080); - nv_wo32(dev, ctx, 0x0444/4, 0xffff0000); - nv_wo32(dev, ctx, 0x0448/4, 0x00000001); - nv_wo32(dev, ctx, 0x045c/4, 0x44400000); - nv_wo32(dev, ctx, 0x0488/4, 0xffff0000); + nv_wo32(ctx, 0x040c, 0x00000101); + nv_wo32(ctx, 0x0420, 0x00000111); + nv_wo32(ctx, 0x0424, 0x00000060); + nv_wo32(ctx, 0x0440, 0x00000080); + nv_wo32(ctx, 0x0444, 0xffff0000); + nv_wo32(ctx, 0x0448, 0x00000001); + nv_wo32(ctx, 0x045c, 0x44400000); + nv_wo32(ctx, 0x0488, 0xffff0000); for (i = 0x04dc; i < 0x04e4; i += 4) - nv_wo32(dev, ctx, i/4, 0x0fff0000); - nv_wo32(dev, ctx, 0x04e8/4, 0x00011100); + nv_wo32(ctx, i, 0x0fff0000); + nv_wo32(ctx, 0x04e8, 0x00011100); for (i = 0x0504; i < 0x0544; i += 4) - nv_wo32(dev, ctx, i/4, 0x07ff0000); - nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff); - nv_wo32(dev, ctx, 0x0588/4, 0x00000080); - nv_wo32(dev, ctx, 0x058c/4, 0x30201000); - nv_wo32(dev, ctx, 0x0590/4, 0x70605040); - nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888); - nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8); - nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000); + nv_wo32(ctx, i, 0x07ff0000); + nv_wo32(ctx, 0x054c, 0x4b7fffff); + nv_wo32(ctx, 0x0588, 0x00000080); + nv_wo32(ctx, 0x058c, 0x30201000); + nv_wo32(ctx, 0x0590, 0x70605040); + nv_wo32(ctx, 0x0594, 0xb8a89888); + nv_wo32(ctx, 0x0598, 0xf8e8d8c8); + nv_wo32(ctx, 0x05ac, 0xb0000000); for (i = 0x0604; i < 0x0644; i += 4) - nv_wo32(dev, ctx, i/4, 0x00010588); + nv_wo32(ctx, i, 0x00010588); for (i = 0x0644; i < 0x0684; i += 4) - nv_wo32(dev, ctx, i/4, 0x00030303); + nv_wo32(ctx, i, 0x00030303); for (i = 0x06c4; i < 0x0704; i += 4) - nv_wo32(dev, ctx, i/4, 0x0008aae4); + nv_wo32(ctx, i, 0x0008aae4); for (i = 0x0704; i < 0x0744; i += 4) - nv_wo32(dev, ctx, i/4, 0x01012000); + nv_wo32(ctx, i, 0x01012000); for (i = 0x0744; i < 0x0784; i += 4) - nv_wo32(dev, ctx, i/4, 0x00080008); - nv_wo32(dev, ctx, 0x0860/4, 0x00040000); - nv_wo32(dev, ctx, 0x0864/4, 0x00010000); + nv_wo32(ctx, i, 0x00080008); + nv_wo32(ctx, 0x0860, 0x00040000); + nv_wo32(ctx, 0x0864, 0x00010000); for (i = 0x0868; i < 0x0878; i += 4) - nv_wo32(dev, ctx, i/4, 0x00040004); + nv_wo32(ctx, i, 0x00040004); for (i = 0x1f1c; i <= 0x308c ; i += 16) { - nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9); - nv_wo32(dev, ctx, i/4 + 1, 0x0436086c); - nv_wo32(dev, ctx, i/4 + 2, 0x000c001b); + nv_wo32(ctx, i + 0, 0x10700ff9); + nv_wo32(ctx, i + 4, 0x0436086c); + nv_wo32(ctx, i + 8, 0x000c001b); } for (i = 0x30bc; i < 0x30cc; i += 4) - nv_wo32(dev, ctx, i/4, 0x0000ffff); - nv_wo32(dev, ctx, 0x3450/4, 0x3f800000); - nv_wo32(dev, ctx, 0x380c/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3820/4, 0x3f800000); - nv_wo32(dev, ctx, 0x384c/4, 0x40000000); - nv_wo32(dev, ctx, 0x3850/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3854/4, 0x3f000000); - nv_wo32(dev, ctx, 0x385c/4, 0x40000000); - nv_wo32(dev, ctx, 0x3860/4, 0x3f800000); - nv_wo32(dev, ctx, 0x3868/4, 0xbf800000); - nv_wo32(dev, ctx, 0x3870/4, 0xbf800000); + nv_wo32(ctx, i, 0x0000ffff); + nv_wo32(ctx, 0x3450, 0x3f800000); + nv_wo32(ctx, 0x380c, 0x3f800000); + nv_wo32(ctx, 0x3820, 0x3f800000); + nv_wo32(ctx, 0x384c, 0x40000000); + nv_wo32(ctx, 0x3850, 0x3f800000); + nv_wo32(ctx, 0x3854, 0x3f000000); + nv_wo32(ctx, 0x385c, 0x40000000); + nv_wo32(ctx, 0x3860, 0x3f800000); + nv_wo32(ctx, 0x3868, 0xbf800000); + nv_wo32(ctx, 0x3870, 0xbf800000); } int @@ -372,7 +372,7 @@ nv20_graph_create_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); - unsigned int idoffs = 0x28/4; + unsigned int idoffs = 0x28; int ret; switch (dev_priv->chipset) { @@ -403,21 +403,19 @@ nv20_graph_create_context(struct nouveau_channel *chan) BUG_ON(1); } - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, - 16, NVOBJ_FLAG_ZERO_ALLOC, - &chan->ramin_grctx); + ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); if (ret) return ret; /* Initialise default context values */ - ctx_init(dev, chan->ramin_grctx->gpuobj); + ctx_init(dev, chan->ramin_grctx); /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ - nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs, - (chan->id << 24) | 0x1); /* CTX_USER */ + nv_wo32(chan->ramin_grctx, idoffs, + (chan->id << 24) | 0x1); /* CTX_USER */ - nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, - chan->ramin_grctx->instance >> 4); + nv_wo32(pgraph->ctx_table, chan->id * 4, chan->ramin_grctx->pinst >> 4); return 0; } @@ -428,10 +426,8 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - if (chan->ramin_grctx) - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - - nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + nv_wo32(pgraph->ctx_table, chan->id * 4, 0); } int @@ -442,7 +438,7 @@ nv20_graph_load_context(struct nouveau_channel *chan) if (!chan->ramin_grctx) return -EINVAL; - inst = chan->ramin_grctx->instance >> 4; + inst = chan->ramin_grctx->pinst >> 4; nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, @@ -465,7 +461,7 @@ nv20_graph_unload_context(struct drm_device *dev) chan = pgraph->channel(dev); if (!chan) return 0; - inst = chan->ramin_grctx->instance >> 4; + inst = chan->ramin_grctx->pinst >> 4; nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER, @@ -552,15 +548,15 @@ nv20_graph_init(struct drm_device *dev) if (!pgraph->ctx_table) { /* Create Context Pointer Table */ - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &pgraph->ctx_table); + ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &pgraph->ctx_table); if (ret) return ret; } nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, - pgraph->ctx_table->instance >> 4); + pgraph->ctx_table->pinst >> 4); nv20_graph_rdi(dev); @@ -646,7 +642,7 @@ nv20_graph_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table); + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); } int @@ -681,15 +677,15 @@ nv30_graph_init(struct drm_device *dev) if (!pgraph->ctx_table) { /* Create Context Pointer Table */ - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &pgraph->ctx_table); + ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &pgraph->ctx_table); if (ret) return ret; } nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, - pgraph->ctx_table->instance >> 4); + pgraph->ctx_table->pinst >> 4); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 2b67f1835c39..d337b8b28cdd 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -27,8 +27,9 @@ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_drm.h" +#include "nouveau_ramht.h" -#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE)) +#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE)) #define NV40_RAMFC__SIZE 128 int @@ -42,7 +43,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc); + NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); if (ret) return ret; @@ -50,7 +51,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) nv_wi32(dev, fc + 0, chan->pushbuf_base); nv_wi32(dev, fc + 4, chan->pushbuf_base); - nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); + nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4); nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | @@ -58,7 +59,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) NV_PFIFO_CACHE1_BIG_ENDIAN | #endif 0x30000000 /* no idea.. */); - nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); + nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4); nv_wi32(dev, fc + 60, 0x0001FFFF); /* enable the fifo dma operation */ @@ -77,8 +78,7 @@ nv40_fifo_destroy_context(struct nouveau_channel *chan) nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - if (chan->ramfc) - nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); } static void @@ -241,9 +241,9 @@ nv40_fifo_init_ramxx(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((dev_priv->ramht_bits - 9) << 16) | - (dev_priv->ramht_offset >> 8)); - nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + ((dev_priv->ramht->bits - 9) << 16) | + (dev_priv->ramht->gpuobj->pinst >> 8)); + nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8); switch (dev_priv->chipset) { case 0x47: @@ -271,7 +271,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) nv_wr32(dev, 0x2230, 0); nv_wr32(dev, NV40_PFIFO_RAMFC, ((dev_priv->vram_size - 512 * 1024 + - dev_priv->ramfc_offset) >> 16) | (3 << 16)); + dev_priv->ramfc->pinst) >> 16) | (3 << 16)); break; } } diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index fd7d2b501316..7ee1b91569b8 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -45,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev) struct nouveau_channel *chan = dev_priv->fifos[i]; if (chan && chan->ramin_grctx && - chan->ramin_grctx->instance == inst) + chan->ramin_grctx->pinst == inst) return chan; } @@ -61,27 +61,25 @@ nv40_graph_create_context(struct nouveau_channel *chan) struct nouveau_grctx ctx = {}; int ret; - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, - 16, NVOBJ_FLAG_ZERO_ALLOC, - &chan->ramin_grctx); + ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); if (ret) return ret; /* Initialise default context values */ ctx.dev = chan->dev; ctx.mode = NOUVEAU_GRCTX_VALS; - ctx.data = chan->ramin_grctx->gpuobj; + ctx.data = chan->ramin_grctx; nv40_grctx_init(&ctx); - nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, - chan->ramin_grctx->gpuobj->im_pramin->start); + nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst); return 0; } void nv40_graph_destroy_context(struct nouveau_channel *chan) { - nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } static int @@ -135,7 +133,7 @@ nv40_graph_load_context(struct nouveau_channel *chan) if (!chan->ramin_grctx) return -EINVAL; - inst = chan->ramin_grctx->instance >> 4; + inst = chan->ramin_grctx->pinst >> 4; ret = nv40_graph_transfer_context(dev, inst, 0); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c index 9b5c97469588..ce585093264e 100644 --- a/drivers/gpu/drm/nouveau/nv40_grctx.c +++ b/drivers/gpu/drm/nouveau/nv40_grctx.c @@ -596,13 +596,13 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx) offset += 0x0280/4; for (i = 0; i < 16; i++, offset += 2) - nv_wo32(dev, obj, offset, 0x3f800000); + nv_wo32(obj, offset * 4, 0x3f800000); for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { for (i = 0; i < vs_nr_b0 * 6; i += 6) - nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001); + nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001); for (i = 0; i < vs_nr_b1 * 4; i += 4) - nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000); + nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000); } } diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index bfd4ca2fe7ef..16380d52cd88 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) OUT_RING(evo, nv_crtc->lut.depth == 8 ? NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON); - OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start << - PAGE_SHIFT) >> 8); + OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8); if (dev_priv->chipset != 0x50) { BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); OUT_RING(evo, NvEvoVRAM); @@ -266,15 +265,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct pll_lims pll; - uint32_t reg, reg1, reg2; + uint32_t reg1, reg2; int ret, N1, M1, N2, M2, P; - if (dev_priv->chipset < NV_C0) - reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head); - else - reg = 0x614140 + (head * 0x800); - - ret = get_pll_limits(dev, reg, &pll); + ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll); if (ret) return ret; @@ -286,11 +280,11 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n", pclk, ret, N1, M1, N2, M2, P); - reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00; - reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00; - nv_wr32(dev, reg, 0x10000611); - nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1); - nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); + reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00; + reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00; + nv_wr32(dev, pll.reg + 0, 0x10000611); + nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1); + nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2); } else if (dev_priv->chipset < NV_C0) { ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); @@ -300,10 +294,10 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", pclk, ret, N1, N2, M1, P); - reg1 = nv_rd32(dev, reg + 4) & 0xffc00000; - nv_wr32(dev, reg, 0x50000610); - nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); - nv_wr32(dev, reg + 8, N2); + reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000; + nv_wr32(dev, pll.reg + 0, 0x50000610); + nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1); + nv_wr32(dev, pll.reg + 8, N2); } else { ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P); if (ret <= 0) @@ -312,9 +306,9 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n", pclk, ret, N1, N2, M1, P); - nv_mask(dev, reg + 0x0c, 0x00000000, 0x00000100); - nv_wr32(dev, reg + 0x04, (P << 16) | (N1 << 8) | M1); - nv_wr32(dev, reg + 0x10, N2 << 16); + nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100); + nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1); + nv_wr32(dev, pll.reg + 0x10, N2 << 16); } return 0; @@ -338,7 +332,9 @@ nv50_crtc_destroy(struct drm_crtc *crtc) nv50_cursor_fini(nv_crtc); + nouveau_bo_unmap(nv_crtc->lut.nvbo); nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); kfree(nv_crtc->mode); kfree(nv_crtc); @@ -491,8 +487,9 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, } static int -nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb, bool update) +nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *passed_fb, + int x, int y, bool update, bool atomic) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = nv_crtc->base.dev; @@ -504,6 +501,28 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y, NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + /* If atomic, we want to switch to the fb we were passed, so + * now we update pointers to do that. (We don't pin; just + * assume we're already pinned and update the base address.) + */ + if (atomic) { + drm_fb = passed_fb; + fb = nouveau_framebuffer(passed_fb); + } + else { + /* If not atomic, we can go ahead and pin, and unpin the + * old fb we were passed. + */ + ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + if (passed_fb) { + struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); + nouveau_bo_unpin(ofb->nvbo); + } + } + switch (drm_fb->depth) { case 8: format = NV50_EVO_CRTC_FB_DEPTH_8; @@ -526,15 +545,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y, return -EINVAL; } - ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; - - if (old_fb) { - struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb); - nouveau_bo_unpin(ofb->nvbo); - } - nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; @@ -685,14 +695,22 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false); nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false); - return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false); + return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false); } static int nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { - return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true); + return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false); +} + +static int +nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true); } static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { @@ -702,6 +720,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = { .mode_fixup = nv50_crtc_mode_fixup, .mode_set = nv50_crtc_mode_set, .mode_set_base = nv50_crtc_mode_set_base, + .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, .load_lut = nv50_crtc_lut_load, }; diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c index 03ad7ab14f09..1b9ce3021aa3 100644 --- a/drivers/gpu/drm/nouveau/nv50_cursor.c +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c @@ -147,7 +147,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc) NV_DEBUG_KMS(dev, "\n"); nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); - if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c index 1bc085962945..875414b09ade 100644 --- a/drivers/gpu/drm/nouveau/nv50_dac.c +++ b/drivers/gpu/drm/nouveau/nv50_dac.c @@ -79,7 +79,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); - if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), + if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, @@ -130,7 +130,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode) NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); /* wait for it to be done */ - if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), + if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 612fa6d6a0cb..55c9663ef2bf 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -30,8 +30,22 @@ #include "nouveau_connector.h" #include "nouveau_fb.h" #include "nouveau_fbcon.h" +#include "nouveau_ramht.h" #include "drm_crtc_helper.h" +static inline int +nv50_sor_nr(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->chipset < 0x90 || + dev_priv->chipset == 0x92 || + dev_priv->chipset == 0xa0) + return 2; + + return 4; +} + static void nv50_evo_channel_del(struct nouveau_channel **pchan) { @@ -42,6 +56,7 @@ nv50_evo_channel_del(struct nouveau_channel **pchan) *pchan = NULL; nouveau_gpuobj_channel_takedown(chan); + nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); if (chan->user) @@ -65,23 +80,23 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, return ret; obj->engine = NVOBJ_ENGINE_DISPLAY; - ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL); + nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); + nv_wo32(obj, 4, limit); + nv_wo32(obj, 8, offset); + nv_wo32(obj, 12, 0x00000000); + nv_wo32(obj, 16, 0x00000000); + if (dev_priv->card_type < NV_C0) + nv_wo32(obj, 20, 0x00010000); + else + nv_wo32(obj, 20, 0x00020000); + dev_priv->engine.instmem.flush(dev); + + ret = nouveau_ramht_insert(evo, name, obj); + nouveau_gpuobj_ref(NULL, &obj); if (ret) { - nouveau_gpuobj_del(dev, &obj); return ret; } - nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); - nv_wo32(dev, obj, 1, limit); - nv_wo32(dev, obj, 2, offset); - nv_wo32(dev, obj, 3, 0x00000000); - nv_wo32(dev, obj, 4, 0x00000000); - if (dev_priv->card_type < NV_C0) - nv_wo32(dev, obj, 5, 0x00010000); - else - nv_wo32(dev, obj, 5, 0x00020000); - dev_priv->engine.instmem.flush(dev); - return 0; } @@ -89,6 +104,7 @@ static int nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramht = NULL; struct nouveau_channel *chan; int ret; @@ -102,32 +118,35 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) chan->user_get = 4; chan->user_put = 0; - INIT_LIST_HEAD(&chan->ramht_refs); - - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); + ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); if (ret) { NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); nv50_evo_channel_del(pchan); return ret; } - ret = drm_mm_init(&chan->ramin_heap, - chan->ramin->gpuobj->im_pramin->start, 32768); + ret = drm_mm_init(&chan->ramin_heap, 0, 32768); if (ret) { NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); nv50_evo_channel_del(pchan); return ret; } - ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16, - 0, &chan->ramht); + ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); if (ret) { NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); nv50_evo_channel_del(pchan); return ret; } + ret = nouveau_ramht_new(dev, ramht, &chan->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + if (dev_priv->chipset != 0x50) { ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, 0, 0xffffffff); @@ -227,11 +246,11 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, 0x006101d0 + (i * 0x04), val); } /* SOR */ - for (i = 0; i < 4; i++) { + for (i = 0; i < nv50_sor_nr(dev); i++) { val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); nv_wr32(dev, 0x006101e0 + (i * 0x04), val); } - /* Something not yet in use, tv-out maybe. */ + /* EXT */ for (i = 0; i < 3; i++) { val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); nv_wr32(dev, 0x006101f0 + (i * 0x04), val); @@ -260,7 +279,7 @@ nv50_display_init(struct drm_device *dev) if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) { nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100); nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1); - if (!nv_wait(0x006194e8, 2, 0)) { + if (!nv_wait(dev, 0x006194e8, 2, 0)) { NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n"); NV_ERROR(dev, "0x6194e8 = 0x%08x\n", nv_rd32(dev, 0x6194e8)); @@ -291,7 +310,8 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); - if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) { + if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), + 0x40000000, 0x40000000)) { NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); @@ -300,7 +320,7 @@ nv50_display_init(struct drm_device *dev) for (i = 0; i < 2; i++) { nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); - if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", @@ -310,7 +330,7 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); - if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { NV_ERROR(dev, "timeout: " @@ -321,16 +341,16 @@ nv50_display_init(struct drm_device *dev) } } - nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9); + nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); /* initialise fifo */ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), - ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) | + ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002); - if (!nv_wait(0x610200, 0x80000000, 0x00000000)) { + if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); return -EBUSY; @@ -370,7 +390,7 @@ nv50_display_init(struct drm_device *dev) BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1); OUT_RING(evo, 0); FIRE_RING(evo); - if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2)) + if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) NV_ERROR(dev, "evo pushbuf stalled\n"); /* enable clock change interrupts. */ @@ -424,7 +444,7 @@ static int nv50_display_disable(struct drm_device *dev) continue; nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask); - if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) { + if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) { NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == " "0x%08x\n", mask, mask); NV_ERROR(dev, "0x610024 = 0x%08x\n", @@ -434,14 +454,14 @@ static int nv50_display_disable(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0); - if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { + if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); } for (i = 0; i < 3; i++) { - if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i), + if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, @@ -710,7 +730,7 @@ nv50_display_unk10_handler(struct drm_device *dev) or = i; } - for (i = 0; type == OUTPUT_ANY && i < 4; i++) { + for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { if (dev_priv->chipset < 0x90 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) @@ -841,7 +861,7 @@ nv50_display_unk20_handler(struct drm_device *dev) or = i; } - for (i = 0; type == OUTPUT_ANY && i < 4; i++) { + for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { if (dev_priv->chipset < 0x90 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index 32611bd30e6d..cd1988b15d2c 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c @@ -20,6 +20,7 @@ nv50_fb_init(struct drm_device *dev) case 0x50: nv_wr32(dev, 0x100c90, 0x0707ff); break; + case 0xa3: case 0xa5: case 0xa8: nv_wr32(dev, 0x100c90, 0x0d0fff); @@ -36,3 +37,42 @@ void nv50_fb_takedown(struct drm_device *dev) { } + +void +nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 trap[6], idx, chinst; + int i, ch; + + idx = nv_rd32(dev, 0x100c90); + if (!(idx & 0x80000000)) + return; + idx &= 0x00ffffff; + + for (i = 0; i < 6; i++) { + nv_wr32(dev, 0x100c90, idx | i << 24); + trap[i] = nv_rd32(dev, 0x100c94); + } + nv_wr32(dev, 0x100c90, idx | 0x80000000); + + if (!display) + return; + + chinst = (trap[2] << 16) | trap[1]; + for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { + struct nouveau_channel *chan = dev_priv->fifos[ch]; + + if (!chan || !chan->ramin) + continue; + + if (chinst == chan->ramin->vinst >> 12) + break; + } + + NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " + "channel %d (0x%08x)\n", + name, (trap[5] & 0x100 ? "read" : "write"), + trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, + trap[0], ch, chinst); +} diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6bf025c6fc6f..6dcf048eddbc 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -1,6 +1,7 @@ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_ramht.h" #include "nouveau_fbcon.h" void @@ -193,7 +194,8 @@ nv50_fbcon_accel_init(struct fb_info *info) if (ret) return ret; - ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL); + ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d); + nouveau_gpuobj_ref(NULL, &eng2d); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index fb0281ae8f90..a46a961102f3 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -27,13 +27,14 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" +#include "nouveau_ramht.h" static void nv50_fifo_playlist_update(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; - struct nouveau_gpuobj_ref *cur; + struct nouveau_gpuobj *cur; int i, nr; NV_DEBUG(dev, "\n"); @@ -43,12 +44,14 @@ nv50_fifo_playlist_update(struct drm_device *dev) /* We never schedule channel 0 or 127 */ for (i = 1, nr = 0; i < 127; i++) { - if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) - nv_wo32(dev, cur->gpuobj, nr++, i); + if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { + nv_wo32(cur, (nr * 4), i); + nr++; + } } dev_priv->engine.instmem.flush(dev); - nv_wr32(dev, 0x32f4, cur->instance >> 12); + nv_wr32(dev, 0x32f4, cur->vinst >> 12); nv_wr32(dev, 0x32ec, nr); nv_wr32(dev, 0x2500, 0x101); } @@ -63,9 +66,9 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel) NV_DEBUG(dev, "ch%d\n", channel); if (dev_priv->chipset == 0x50) - inst = chan->ramfc->instance >> 12; + inst = chan->ramfc->vinst >> 12; else - inst = chan->ramfc->instance >> 8; + inst = chan->ramfc->vinst >> 8; nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); @@ -163,19 +166,19 @@ nv50_fifo_init(struct drm_device *dev) goto just_reset; } - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, - &pfifo->playlist[0]); + ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, + &pfifo->playlist[0]); if (ret) { NV_ERROR(dev, "error creating playlist 0: %d\n", ret); return ret; } - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, - &pfifo->playlist[1]); + ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, + &pfifo->playlist[1]); if (ret) { - nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); NV_ERROR(dev, "error creating playlist 1: %d\n", ret); return ret; } @@ -203,8 +206,8 @@ nv50_fifo_takedown(struct drm_device *dev) if (!pfifo->playlist[0]) return; - nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); - nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); } int @@ -226,59 +229,54 @@ nv50_fifo_create_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); if (dev_priv->chipset == 0x50) { - uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start; - uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start; - - ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset, - 0x100, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &ramfc, + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, + chan->ramin->vinst, 0x100, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); if (ret) return ret; - ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400, - ramin_voffset + 0x0400, 4096, - 0, NULL, &chan->cache); + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400, + chan->ramin->vinst + 0x0400, + 4096, 0, &chan->cache); if (ret) return ret; } else { - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, - &chan->ramfc); + ret = nouveau_gpuobj_new(dev, chan, 0x100, 256, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &chan->ramfc); if (ret) return ret; - ramfc = chan->ramfc->gpuobj; - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024, - 0, &chan->cache); + ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, + 0, &chan->cache); if (ret) return ret; } + ramfc = chan->ramfc; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); - nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ | - (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->instance >> 4)); - nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); - nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); - nv_wo32(dev, ramfc, 0x40/4, 0x00000000); - nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); - nv_wo32(dev, ramfc, 0x78/4, 0x00000000); - nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078); - nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base + - chan->dma.ib_base * 4); - nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16); + nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); + nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | + (4 << 24) /* SEARCH_FULL */ | + (chan->ramht->gpuobj->cinst >> 4)); + nv_wo32(ramfc, 0x44, 0x2101ffff); + nv_wo32(ramfc, 0x60, 0x7fffffff); + nv_wo32(ramfc, 0x40, 0x00000000); + nv_wo32(ramfc, 0x7c, 0x30000001); + nv_wo32(ramfc, 0x78, 0x00000000); + nv_wo32(ramfc, 0x3c, 0x403f6078); + nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4); + nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16); if (dev_priv->chipset != 0x50) { - nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); - nv_wo32(dev, chan->ramin->gpuobj, 1, - chan->ramfc->instance >> 8); + nv_wo32(chan->ramin, 0, chan->id); + nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8); - nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10); - nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); + nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10); + nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12); } dev_priv->engine.instmem.flush(dev); @@ -293,12 +291,13 @@ void nv50_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; - struct nouveau_gpuobj_ref *ramfc = chan->ramfc; + struct nouveau_gpuobj *ramfc = NULL; NV_DEBUG(dev, "ch%d\n", chan->id); /* This will ensure the channel is seen as disabled. */ - chan->ramfc = NULL; + nouveau_gpuobj_ref(chan->ramfc, &ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); nv50_fifo_channel_disable(dev, chan->id); /* Dummy channel, also used on ch 127 */ @@ -306,8 +305,8 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) nv50_fifo_channel_disable(dev, 127); nv50_fifo_playlist_update(dev); - nouveau_gpuobj_ref_del(dev, &ramfc); - nouveau_gpuobj_ref_del(dev, &chan->cache); + nouveau_gpuobj_ref(NULL, &ramfc); + nouveau_gpuobj_ref(NULL, &chan->cache); } int @@ -315,63 +314,63 @@ nv50_fifo_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; - struct nouveau_gpuobj *cache = chan->cache->gpuobj; + struct nouveau_gpuobj *ramfc = chan->ramfc; + struct nouveau_gpuobj *cache = chan->cache; int ptr, cnt; NV_DEBUG(dev, "ch%d\n", chan->id); - nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); - nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); - nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); - nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4)); - nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4)); - nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4)); - nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4)); - nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4)); - nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4)); - nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4)); - nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4)); - nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4)); - nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4)); - nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4)); - nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4)); - nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4)); - nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4)); - nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4)); - nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4)); - nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4)); - nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4)); - nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4)); - nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4)); - nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4)); - nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4)); - nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4)); - nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4)); - nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4)); - nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4)); - nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4)); - nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4)); - nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4)); - nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4)); + nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00)); + nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04)); + nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08)); + nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c)); + nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10)); + nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14)); + nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18)); + nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c)); + nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20)); + nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24)); + nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28)); + nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c)); + nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30)); + nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34)); + nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38)); + nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c)); + nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40)); + nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44)); + nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48)); + nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c)); + nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50)); + nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54)); + nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58)); + nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c)); + nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60)); + nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64)); + nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68)); + nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c)); + nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70)); + nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74)); + nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78)); + nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c)); + nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80)); - cnt = nv_ro32(dev, ramfc, 0x84/4); + cnt = nv_ro32(ramfc, 0x84); for (ptr = 0; ptr < cnt; ptr++) { nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr), - nv_ro32(dev, cache, (ptr * 2) + 0)); + nv_ro32(cache, (ptr * 8) + 0)); nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr), - nv_ro32(dev, cache, (ptr * 2) + 1)); + nv_ro32(cache, (ptr * 8) + 4)); } nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2); nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); /* guessing that all the 0x34xx regs aren't on NV50 */ if (dev_priv->chipset != 0x50) { - nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4)); - nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4)); - nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4)); - nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4)); - nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); + nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88)); + nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c)); + nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90)); + nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94)); + nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98)); } nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); @@ -399,62 +398,63 @@ nv50_fifo_unload_context(struct drm_device *dev) return -EINVAL; } NV_DEBUG(dev, "ch%d\n", chan->id); - ramfc = chan->ramfc->gpuobj; - cache = chan->cache->gpuobj; + ramfc = chan->ramfc; + cache = chan->cache; - nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); - nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); - nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); - nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320)); - nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244)); - nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328)); - nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368)); - nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c)); - nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370)); - nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374)); - nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378)); - nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c)); - nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228)); - nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364)); - nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0)); - nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224)); - nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c)); - nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044)); - nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c)); - nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234)); - nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340)); - nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344)); - nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280)); - nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254)); - nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260)); - nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264)); - nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268)); - nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c)); - nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4)); - nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248)); - nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088)); - nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058)); - nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210)); + nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330)); + nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334)); + nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240)); + nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320)); + nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244)); + nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328)); + nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368)); + nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c)); + nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370)); + nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374)); + nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378)); + nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c)); + nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228)); + nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364)); + nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0)); + nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224)); + nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c)); + nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044)); + nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c)); + nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234)); + nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340)); + nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344)); + nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280)); + nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254)); + nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260)); + nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264)); + nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268)); + nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c)); + nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4)); + nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248)); + nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088)); + nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058)); + nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210)); put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2; get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2; ptr = 0; while (put != get) { - nv_wo32(dev, cache, ptr++, - nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); - nv_wo32(dev, cache, ptr++, - nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); + nv_wo32(cache, ptr + 0, + nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get))); + nv_wo32(cache, ptr + 4, + nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get))); get = (get + 1) & 0x1ff; + ptr += 8; } /* guessing that all the 0x34xx regs aren't on NV50 */ if (dev_priv->chipset != 0x50) { - nv_wo32(dev, ramfc, 0x84/4, ptr >> 1); - nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c)); - nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400)); - nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404)); - nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408)); - nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); + nv_wo32(ramfc, 0x84, ptr >> 3); + nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c)); + nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400)); + nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404)); + nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408)); + nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410)); } dev_priv->engine.instmem.flush(dev); diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 1413028e1580..cbf5ae2f67d4 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -27,7 +27,7 @@ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" - +#include "nouveau_ramht.h" #include "nouveau_grctx.h" static void @@ -181,7 +181,7 @@ nv50_graph_channel(struct drm_device *dev) /* Be sure we're not in the middle of a context switch or bad things * will happen, such as unloading the wrong pgraph context. */ - if (!nv_wait(0x400300, 0x00000001, 0x00000000)) + if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) NV_ERROR(dev, "Ctxprog is still running\n"); inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); @@ -192,7 +192,7 @@ nv50_graph_channel(struct drm_device *dev) for (i = 0; i < dev_priv->engine.fifo.channels; i++) { struct nouveau_channel *chan = dev_priv->fifos[i]; - if (chan && chan->ramin && chan->ramin->instance == inst) + if (chan && chan->ramin && chan->ramin->vinst == inst) return chan; } @@ -204,36 +204,34 @@ nv50_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; - struct nouveau_gpuobj *obj; + struct nouveau_gpuobj *ramin = chan->ramin; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_grctx ctx = {}; int hdr, ret; NV_DEBUG(dev, "ch%d\n", chan->id); - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, - 0x1000, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); + ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); if (ret) return ret; - obj = chan->ramin_grctx->gpuobj; hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; - nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); - nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + - pgraph->grctx_size - 1); - nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); - nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); - nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); - nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); + nv_wo32(ramin, hdr + 0x00, 0x00190002); + nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst + + pgraph->grctx_size - 1); + nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst); + nv_wo32(ramin, hdr + 0x0c, 0); + nv_wo32(ramin, hdr + 0x10, 0); + nv_wo32(ramin, hdr + 0x14, 0x00010000); ctx.dev = chan->dev; ctx.mode = NOUVEAU_GRCTX_VALS; - ctx.data = obj; + ctx.data = chan->ramin_grctx; nv50_grctx_init(&ctx); - nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12); + nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); dev_priv->engine.instmem.flush(dev); return 0; @@ -248,14 +246,14 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - if (!chan->ramin || !chan->ramin->gpuobj) + if (!chan->ramin) return; for (i = hdr; i < hdr + 24; i += 4) - nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); + nv_wo32(chan->ramin, i, 0); dev_priv->engine.instmem.flush(dev); - nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } static int @@ -282,7 +280,7 @@ nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst) int nv50_graph_load_context(struct nouveau_channel *chan) { - uint32_t inst = chan->ramin->instance >> 12; + uint32_t inst = chan->ramin->vinst >> 12; NV_DEBUG(chan->dev, "ch%d\n", chan->id); return nv50_graph_do_load_context(chan->dev, inst); @@ -327,15 +325,16 @@ static int nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, int mthd, uint32_t data) { - struct nouveau_gpuobj_ref *ref = NULL; + struct nouveau_gpuobj *gpuobj; - if (nouveau_gpuobj_ref_find(chan, data, &ref)) + gpuobj = nouveau_ramht_find(chan, data); + if (!gpuobj) return -ENOENT; - if (nouveau_notifier_offset(ref->gpuobj, NULL)) + if (nouveau_notifier_offset(gpuobj, NULL)) return -EINVAL; - chan->nvsw.vblsem = ref->gpuobj; + chan->nvsw.vblsem = gpuobj; chan->nvsw.vblsem_offset = ~0; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index 42a8fb20c1e6..336aab2a24a6 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c @@ -103,6 +103,9 @@ #include "nouveau_drv.h" #include "nouveau_grctx.h" +#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) +#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) + /* * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's * the GPU itself that does context-switching, but it needs a special @@ -182,6 +185,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx) case 0xa8: case 0xaa: case 0xac: + case 0xaf: break; default: NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for " @@ -267,6 +271,9 @@ nv50_grctx_init(struct nouveau_grctx *ctx) * registers to save/restore and the default values for them. */ +static void +nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx); + static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx) { @@ -286,7 +293,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, 0x400840, 0xffe806a8); } gr_def(ctx, 0x400844, 0x00000002); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + if (IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x400894, 0x00001000); gr_def(ctx, 0x4008e8, 0x00000003); gr_def(ctx, 0x4008ec, 0x00001000); @@ -299,13 +306,15 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset >= 0xa0) cp_ctx(ctx, 0x400b00, 0x1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + if (IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, 0x400b10, 0x1); gr_def(ctx, 0x400b10, 0x0001629d); cp_ctx(ctx, 0x400b20, 0x1); gr_def(ctx, 0x400b20, 0x0001629d); } + nv50_graph_construct_mmio_ddata(ctx); + /* 0C00: VFETCH */ cp_ctx(ctx, 0x400c08, 0x2); gr_def(ctx, 0x400c08, 0x0000fe0c); @@ -314,7 +323,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, 0x401008, 0x4); gr_def(ctx, 0x401014, 0x00001000); - } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) { + } else if (!IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, 0x401008, 0x5); gr_def(ctx, 0x401018, 0x00001000); } else { @@ -368,10 +377,13 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xa3: case 0xa5: case 0xa8: + case 0xaf: gr_def(ctx, 0x401c00, 0x142500df); break; } + /* 2000 */ + /* 2400 */ cp_ctx(ctx, 0x402400, 0x1); if (dev_priv->chipset == 0x50) @@ -380,12 +392,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) cp_ctx(ctx, 0x402408, 0x2); gr_def(ctx, 0x402408, 0x00000600); - /* 2800 */ + /* 2800: CSCHED */ cp_ctx(ctx, 0x402800, 0x1); if (dev_priv->chipset == 0x50) gr_def(ctx, 0x402800, 0x00000006); - /* 2C00 */ + /* 2C00: ZCULL */ cp_ctx(ctx, 0x402c08, 0x6); if (dev_priv->chipset != 0x50) gr_def(ctx, 0x402c14, 0x01000000); @@ -396,23 +408,23 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) cp_ctx(ctx, 0x402ca0, 0x2); if (dev_priv->chipset < 0xa0) gr_def(ctx, 0x402ca0, 0x00000400); - else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) + else if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x402ca0, 0x00000800); else gr_def(ctx, 0x402ca0, 0x00000400); cp_ctx(ctx, 0x402cac, 0x4); - /* 3000 */ + /* 3000: ENG2D */ cp_ctx(ctx, 0x403004, 0x1); gr_def(ctx, 0x403004, 0x00000001); - /* 3404 */ + /* 3400 */ if (dev_priv->chipset >= 0xa0) { cp_ctx(ctx, 0x403404, 0x1); gr_def(ctx, 0x403404, 0x00000001); } - /* 5000 */ + /* 5000: CCACHE */ cp_ctx(ctx, 0x405000, 0x1); switch (dev_priv->chipset) { case 0x50: @@ -425,6 +437,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xa8: case 0xaa: case 0xac: + case 0xaf: gr_def(ctx, 0x405000, 0x000e0080); break; case 0x86: @@ -441,210 +454,6 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) cp_ctx(ctx, 0x405024, 0x1); cp_ctx(ctx, 0x40502c, 0x1); - /* 5400 or maybe 4800 */ - if (dev_priv->chipset == 0x50) { - offset = 0x405400; - cp_ctx(ctx, 0x405400, 0xea); - } else if (dev_priv->chipset < 0x94) { - offset = 0x405400; - cp_ctx(ctx, 0x405400, 0xcb); - } else if (dev_priv->chipset < 0xa0) { - offset = 0x405400; - cp_ctx(ctx, 0x405400, 0xcc); - } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - offset = 0x404800; - cp_ctx(ctx, 0x404800, 0xda); - } else { - offset = 0x405400; - cp_ctx(ctx, 0x405400, 0xd4); - } - gr_def(ctx, offset + 0x0c, 0x00000002); - gr_def(ctx, offset + 0x10, 0x00000001); - if (dev_priv->chipset >= 0x94) - offset += 4; - gr_def(ctx, offset + 0x1c, 0x00000001); - gr_def(ctx, offset + 0x20, 0x00000100); - gr_def(ctx, offset + 0x38, 0x00000002); - gr_def(ctx, offset + 0x3c, 0x00000001); - gr_def(ctx, offset + 0x40, 0x00000001); - gr_def(ctx, offset + 0x50, 0x00000001); - gr_def(ctx, offset + 0x54, 0x003fffff); - gr_def(ctx, offset + 0x58, 0x00001fff); - gr_def(ctx, offset + 0x60, 0x00000001); - gr_def(ctx, offset + 0x64, 0x00000001); - gr_def(ctx, offset + 0x6c, 0x00000001); - gr_def(ctx, offset + 0x70, 0x00000001); - gr_def(ctx, offset + 0x74, 0x00000001); - gr_def(ctx, offset + 0x78, 0x00000004); - gr_def(ctx, offset + 0x7c, 0x00000001); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - offset += 4; - gr_def(ctx, offset + 0x80, 0x00000001); - gr_def(ctx, offset + 0x84, 0x00000001); - gr_def(ctx, offset + 0x88, 0x00000007); - gr_def(ctx, offset + 0x8c, 0x00000001); - gr_def(ctx, offset + 0x90, 0x00000007); - gr_def(ctx, offset + 0x94, 0x00000001); - gr_def(ctx, offset + 0x98, 0x00000001); - gr_def(ctx, offset + 0x9c, 0x00000001); - if (dev_priv->chipset == 0x50) { - gr_def(ctx, offset + 0xb0, 0x00000001); - gr_def(ctx, offset + 0xb4, 0x00000001); - gr_def(ctx, offset + 0xbc, 0x00000001); - gr_def(ctx, offset + 0xc0, 0x0000000a); - gr_def(ctx, offset + 0xd0, 0x00000040); - gr_def(ctx, offset + 0xd8, 0x00000002); - gr_def(ctx, offset + 0xdc, 0x00000100); - gr_def(ctx, offset + 0xe0, 0x00000001); - gr_def(ctx, offset + 0xe4, 0x00000100); - gr_def(ctx, offset + 0x100, 0x00000001); - gr_def(ctx, offset + 0x124, 0x00000004); - gr_def(ctx, offset + 0x13c, 0x00000001); - gr_def(ctx, offset + 0x140, 0x00000100); - gr_def(ctx, offset + 0x148, 0x00000001); - gr_def(ctx, offset + 0x154, 0x00000100); - gr_def(ctx, offset + 0x158, 0x00000001); - gr_def(ctx, offset + 0x15c, 0x00000100); - gr_def(ctx, offset + 0x164, 0x00000001); - gr_def(ctx, offset + 0x170, 0x00000100); - gr_def(ctx, offset + 0x174, 0x00000001); - gr_def(ctx, offset + 0x17c, 0x00000001); - gr_def(ctx, offset + 0x188, 0x00000002); - gr_def(ctx, offset + 0x190, 0x00000001); - gr_def(ctx, offset + 0x198, 0x00000001); - gr_def(ctx, offset + 0x1ac, 0x00000003); - offset += 0xd0; - } else { - gr_def(ctx, offset + 0xb0, 0x00000001); - gr_def(ctx, offset + 0xb4, 0x00000100); - gr_def(ctx, offset + 0xbc, 0x00000001); - gr_def(ctx, offset + 0xc8, 0x00000100); - gr_def(ctx, offset + 0xcc, 0x00000001); - gr_def(ctx, offset + 0xd0, 0x00000100); - gr_def(ctx, offset + 0xd8, 0x00000001); - gr_def(ctx, offset + 0xe4, 0x00000100); - } - gr_def(ctx, offset + 0xf8, 0x00000004); - gr_def(ctx, offset + 0xfc, 0x00000070); - gr_def(ctx, offset + 0x100, 0x00000080); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - offset += 4; - gr_def(ctx, offset + 0x114, 0x0000000c); - if (dev_priv->chipset == 0x50) - offset -= 4; - gr_def(ctx, offset + 0x11c, 0x00000008); - gr_def(ctx, offset + 0x120, 0x00000014); - if (dev_priv->chipset == 0x50) { - gr_def(ctx, offset + 0x124, 0x00000026); - offset -= 0x18; - } else { - gr_def(ctx, offset + 0x128, 0x00000029); - gr_def(ctx, offset + 0x12c, 0x00000027); - gr_def(ctx, offset + 0x130, 0x00000026); - gr_def(ctx, offset + 0x134, 0x00000008); - gr_def(ctx, offset + 0x138, 0x00000004); - gr_def(ctx, offset + 0x13c, 0x00000027); - } - gr_def(ctx, offset + 0x148, 0x00000001); - gr_def(ctx, offset + 0x14c, 0x00000002); - gr_def(ctx, offset + 0x150, 0x00000003); - gr_def(ctx, offset + 0x154, 0x00000004); - gr_def(ctx, offset + 0x158, 0x00000005); - gr_def(ctx, offset + 0x15c, 0x00000006); - gr_def(ctx, offset + 0x160, 0x00000007); - gr_def(ctx, offset + 0x164, 0x00000001); - gr_def(ctx, offset + 0x1a8, 0x000000cf); - if (dev_priv->chipset == 0x50) - offset -= 4; - gr_def(ctx, offset + 0x1d8, 0x00000080); - gr_def(ctx, offset + 0x1dc, 0x00000004); - gr_def(ctx, offset + 0x1e0, 0x00000004); - if (dev_priv->chipset == 0x50) - offset -= 4; - else - gr_def(ctx, offset + 0x1e4, 0x00000003); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - gr_def(ctx, offset + 0x1ec, 0x00000003); - offset += 8; - } - gr_def(ctx, offset + 0x1e8, 0x00000001); - if (dev_priv->chipset == 0x50) - offset -= 4; - gr_def(ctx, offset + 0x1f4, 0x00000012); - gr_def(ctx, offset + 0x1f8, 0x00000010); - gr_def(ctx, offset + 0x1fc, 0x0000000c); - gr_def(ctx, offset + 0x200, 0x00000001); - gr_def(ctx, offset + 0x210, 0x00000004); - gr_def(ctx, offset + 0x214, 0x00000002); - gr_def(ctx, offset + 0x218, 0x00000004); - if (dev_priv->chipset >= 0xa0) - offset += 4; - gr_def(ctx, offset + 0x224, 0x003fffff); - gr_def(ctx, offset + 0x228, 0x00001fff); - if (dev_priv->chipset == 0x50) - offset -= 0x20; - else if (dev_priv->chipset >= 0xa0) { - gr_def(ctx, offset + 0x250, 0x00000001); - gr_def(ctx, offset + 0x254, 0x00000001); - gr_def(ctx, offset + 0x258, 0x00000002); - offset += 0x10; - } - gr_def(ctx, offset + 0x250, 0x00000004); - gr_def(ctx, offset + 0x254, 0x00000014); - gr_def(ctx, offset + 0x258, 0x00000001); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - offset += 4; - gr_def(ctx, offset + 0x264, 0x00000002); - if (dev_priv->chipset >= 0xa0) - offset += 8; - gr_def(ctx, offset + 0x270, 0x00000001); - gr_def(ctx, offset + 0x278, 0x00000002); - gr_def(ctx, offset + 0x27c, 0x00001000); - if (dev_priv->chipset == 0x50) - offset -= 0xc; - else { - gr_def(ctx, offset + 0x280, 0x00000e00); - gr_def(ctx, offset + 0x284, 0x00001000); - gr_def(ctx, offset + 0x288, 0x00001e00); - } - gr_def(ctx, offset + 0x290, 0x00000001); - gr_def(ctx, offset + 0x294, 0x00000001); - gr_def(ctx, offset + 0x298, 0x00000001); - gr_def(ctx, offset + 0x29c, 0x00000001); - gr_def(ctx, offset + 0x2a0, 0x00000001); - gr_def(ctx, offset + 0x2b0, 0x00000200); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - gr_def(ctx, offset + 0x2b4, 0x00000200); - offset += 4; - } - if (dev_priv->chipset < 0xa0) { - gr_def(ctx, offset + 0x2b8, 0x00000001); - gr_def(ctx, offset + 0x2bc, 0x00000070); - gr_def(ctx, offset + 0x2c0, 0x00000080); - gr_def(ctx, offset + 0x2cc, 0x00000001); - gr_def(ctx, offset + 0x2d0, 0x00000070); - gr_def(ctx, offset + 0x2d4, 0x00000080); - } else { - gr_def(ctx, offset + 0x2b8, 0x00000001); - gr_def(ctx, offset + 0x2bc, 0x000000f0); - gr_def(ctx, offset + 0x2c0, 0x000000ff); - gr_def(ctx, offset + 0x2cc, 0x00000001); - gr_def(ctx, offset + 0x2d0, 0x000000f0); - gr_def(ctx, offset + 0x2d4, 0x000000ff); - gr_def(ctx, offset + 0x2dc, 0x00000009); - offset += 4; - } - gr_def(ctx, offset + 0x2e4, 0x00000001); - gr_def(ctx, offset + 0x2e8, 0x000000cf); - gr_def(ctx, offset + 0x2f0, 0x00000001); - gr_def(ctx, offset + 0x300, 0x000000cf); - gr_def(ctx, offset + 0x308, 0x00000002); - gr_def(ctx, offset + 0x310, 0x00000001); - gr_def(ctx, offset + 0x318, 0x00000001); - gr_def(ctx, offset + 0x320, 0x000000cf); - gr_def(ctx, offset + 0x324, 0x000000cf); - gr_def(ctx, offset + 0x328, 0x00000001); - /* 6000? */ if (dev_priv->chipset == 0x50) cp_ctx(ctx, 0x4063e0, 0x1); @@ -661,7 +470,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, 0x406818, 0x00000f80); else gr_def(ctx, 0x406818, 0x00001f80); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + if (IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x40681c, 0x00000030); cp_ctx(ctx, 0x406830, 0x3); } @@ -706,7 +515,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) cp_ctx(ctx, 0x407094 + (i<<8), 1); - else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + else if (!IS_NVA3F(dev_priv->chipset)) cp_ctx(ctx, 0x407094 + (i<<8), 3); else { cp_ctx(ctx, 0x407094 + (i<<8), 4); @@ -799,6 +608,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) case 0xa8: case 0xaa: case 0xac: + case 0xaf: gr_def(ctx, offset + 0x1c, 0x300c0000); break; } @@ -825,7 +635,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, base + 0x304, 0x00007070); else if (dev_priv->chipset < 0xa0) gr_def(ctx, base + 0x304, 0x00027070); - else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + else if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, base + 0x304, 0x01127070); else gr_def(ctx, base + 0x304, 0x05127070); @@ -849,7 +659,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, base + 0x340, 9); offset = base + 0x340; - } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + } else if (!IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, base + 0x33c, 0xb); offset = base + 0x344; } else { @@ -880,7 +690,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, offset + 0x0, 0x000001f0); gr_def(ctx, offset + 0x4, 0x00000001); gr_def(ctx, offset + 0x8, 0x00000003); - if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa) + if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset)) gr_def(ctx, offset + 0xc, 0x00008000); gr_def(ctx, offset + 0x14, 0x00039e00); cp_ctx(ctx, offset + 0x1c, 2); @@ -892,7 +702,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) if (dev_priv->chipset >= 0xa0) { cp_ctx(ctx, base + 0x54c, 2); - if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, base + 0x54c, 0x003fe006); else gr_def(ctx, base + 0x54c, 0x003fe007); @@ -948,6 +758,336 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) } } +static void +dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { + int i; + if (val && ctx->mode == NOUVEAU_GRCTX_VALS) + for (i = 0; i < num; i++) + nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); + ctx->ctxvals_pos += num; +} + +static void +nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int base, num; + base = ctx->ctxvals_pos; + + /* tesla state */ + dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK135C */ + + /* SRC_TIC state */ + dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */ + dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ + dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ + if (dev_priv->chipset >= 0x94) + dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ + dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ + dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ + + /* turing state */ + dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */ + dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ + dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ + dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ + dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */ + dd_emit(ctx, 1, 1); /* 00000001 LANES32 */ + dd_emit(ctx, 1, 0); /* 000000ff UNK370 */ + dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */ + dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */ + dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */ + dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */ + dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */ + dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */ + dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ + dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ + dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ + if (IS_NVA3F(dev_priv->chipset)) + dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ + dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ + dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ + dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */ + dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */ + dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */ + dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */ + dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */ + dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ + + /* compat 2d state */ + if (dev_priv->chipset == 0x50) { + dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ + + dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ + + dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */ + dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */ + + dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */ + dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */ + dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */ + dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */ + dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */ + dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */ + + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */ + dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */ + dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */ + + dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */ + dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */ + dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */ + } + + /* m2mf state */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */ + dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */ + dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */ + dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */ + dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */ + dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */ + dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */ + dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */ + dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */ + dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ + + /* more compat 2d state */ + if (dev_priv->chipset == 0x50) { + dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ + + dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */ + + dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */ + dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */ + dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */ + dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */ + dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */ + dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */ + dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */ + dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */ + dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */ + + dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */ + } + + /* tesla state */ + dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */ + dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */ + dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */ + dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ + dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ + if (IS_NVA3F(dev_priv->chipset)) { + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ + } else { + dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ + } + dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ + dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ + dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ + if (dev_priv->chipset == 0x50) { + dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ + dd_emit(ctx, 1, 0); /* 00000001 */ + } else { + dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */ + dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */ + dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */ + dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */ + dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */ + dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */ + } + dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */ + dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */ + dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */ + dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */ + dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */ + dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */ + dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */ + dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */ + dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */ + dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */ + dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ + dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ + dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 3, 0); /* 1, 1, 1 */ + else + dd_emit(ctx, 2, 0); /* 1, 1 */ + dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */ + dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ + dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + if (IS_NVA3F(dev_priv->chipset)) { + dd_emit(ctx, 1, 3); /* 00000003 */ + dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ + } + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ + dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ + dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ + dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ + dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ + dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */ + dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */ + dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */ + dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ + dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ + dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + if (dev_priv->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* ffffffff */ + dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ + dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ + dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ + if (dev_priv->chipset != 0x50) + dd_emit(ctx, 8, 0); /* 00000001 */ + if (dev_priv->chipset >= 0xa0) { + dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ + dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ + dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ + dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */ + } + dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */ + dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ + dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ + if (IS_NVA3F(dev_priv->chipset)) + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ + if (dev_priv->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ + dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ + if (dev_priv->chipset >= 0xa0) + dd_emit(ctx, 1, 0); /* 00000003 */ + dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ + dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ + dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ + dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ + dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ + if (dev_priv->chipset != 0x50) { + dd_emit(ctx, 1, 0xe00); /* 7fff */ + dd_emit(ctx, 1, 0x1000); /* 7fff */ + dd_emit(ctx, 1, 0x1e00); /* 7fff */ + } + dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */ + dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */ + dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */ + dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */ + dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ + dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ + dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ + if (IS_NVA3F(dev_priv->chipset)) + dd_emit(ctx, 1, 0x200); + dd_emit(ctx, 1, 0); /* 00000001 */ + if (dev_priv->chipset < 0xa0) { + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0x70); /* 000000ff */ + dd_emit(ctx, 1, 0x80); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0x70); /* 000000ff */ + dd_emit(ctx, 1, 0x80); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + } else { + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0xf0); /* 000000ff */ + dd_emit(ctx, 1, 0xff); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 00000001 */ + dd_emit(ctx, 1, 1); /* 00000001 */ + dd_emit(ctx, 1, 0xf0); /* 000000ff */ + dd_emit(ctx, 1, 0xff); /* 000000ff */ + dd_emit(ctx, 1, 0); /* 000000ff */ + dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */ + } + + /* eng2d state */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */ + dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */ + dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */ + dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */ + dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */ + dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */ + dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */ + dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */ + dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */ + dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */ + + num = ctx->ctxvals_pos - base; + ctx->ctxvals_pos = base; + if (IS_NVA3F(dev_priv->chipset)) + cp_ctx(ctx, 0x404800, num); + else + cp_ctx(ctx, 0x405400, num); +} + /* * xfer areas. These are a pain. * @@ -990,28 +1130,33 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) * without the help of ctxprog. */ -static inline void +static void xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { int i; if (val && ctx->mode == NOUVEAU_GRCTX_VALS) for (i = 0; i < num; i++) - nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val); + nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); ctx->ctxvals_pos += num << 3; } /* Gene declarations... */ +static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx); -static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx); static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx); @@ -1030,102 +1175,32 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) { /* Strand 0 */ ctx->ctxvals_pos = offset; - switch (dev_priv->chipset) { - case 0x50: - xf_emit(ctx, 0x99, 0); - break; - case 0x84: - case 0x86: - xf_emit(ctx, 0x384, 0); - break; - case 0x92: - case 0x94: - case 0x96: - case 0x98: - xf_emit(ctx, 0x380, 0); - break; - } - nv50_graph_construct_gene_m2mf (ctx); - switch (dev_priv->chipset) { - case 0x50: - case 0x84: - case 0x86: - case 0x98: - xf_emit(ctx, 0x4c4, 0); - break; - case 0x92: - case 0x94: - case 0x96: - xf_emit(ctx, 0x984, 0); - break; - } - nv50_graph_construct_gene_unk5(ctx); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 0xa, 0); - else - xf_emit(ctx, 0xb, 0); - nv50_graph_construct_gene_unk4(ctx); - nv50_graph_construct_gene_unk3(ctx); + nv50_graph_construct_gene_dispatch(ctx); + nv50_graph_construct_gene_m2mf(ctx); + nv50_graph_construct_gene_unk24xx(ctx); + nv50_graph_construct_gene_clipid(ctx); + nv50_graph_construct_gene_zcull(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 1 */ ctx->ctxvals_pos = offset + 0x1; - nv50_graph_construct_gene_unk6(ctx); - nv50_graph_construct_gene_unk7(ctx); - nv50_graph_construct_gene_unk8(ctx); - switch (dev_priv->chipset) { - case 0x50: - case 0x92: - xf_emit(ctx, 0xfb, 0); - break; - case 0x84: - xf_emit(ctx, 0xd3, 0); - break; - case 0x94: - case 0x96: - xf_emit(ctx, 0xab, 0); - break; - case 0x86: - case 0x98: - xf_emit(ctx, 0x6b, 0); - break; - } - xf_emit(ctx, 2, 0x4e3bfdf); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 0xb, 0); - xf_emit(ctx, 2, 0x4e3bfdf); + nv50_graph_construct_gene_vfetch(ctx); + nv50_graph_construct_gene_eng2d(ctx); + nv50_graph_construct_gene_csched(ctx); + nv50_graph_construct_gene_ropm1(ctx); + nv50_graph_construct_gene_ropm2(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 2 */ ctx->ctxvals_pos = offset + 0x2; - switch (dev_priv->chipset) { - case 0x50: - case 0x92: - xf_emit(ctx, 0xa80, 0); - break; - case 0x84: - xf_emit(ctx, 0xa7e, 0); - break; - case 0x94: - case 0x96: - xf_emit(ctx, 0xa7c, 0); - break; - case 0x86: - case 0x98: - xf_emit(ctx, 0xa7a, 0); - break; - } - xf_emit(ctx, 1, 0x3fffff); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x1fff); - xf_emit(ctx, 0xe, 0); - nv50_graph_construct_gene_unk9(ctx); - nv50_graph_construct_gene_unk2(ctx); - nv50_graph_construct_gene_unk1(ctx); - nv50_graph_construct_gene_unk10(ctx); + nv50_graph_construct_gene_ccache(ctx); + nv50_graph_construct_gene_unk1cxx(ctx); + nv50_graph_construct_gene_strmout(ctx); + nv50_graph_construct_gene_unk14xx(ctx); + nv50_graph_construct_gene_unk10xx(ctx); + nv50_graph_construct_gene_unk34xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; @@ -1150,86 +1225,46 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) } else { /* Strand 0 */ ctx->ctxvals_pos = offset; - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x385, 0); - else - xf_emit(ctx, 0x384, 0); + nv50_graph_construct_gene_dispatch(ctx); nv50_graph_construct_gene_m2mf(ctx); - xf_emit(ctx, 0x950, 0); - nv50_graph_construct_gene_unk10(ctx); - xf_emit(ctx, 1, 0x0fac6881); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 1, 1); - xf_emit(ctx, 3, 0); - } - nv50_graph_construct_gene_unk8(ctx); - if (dev_priv->chipset == 0xa0) - xf_emit(ctx, 0x189, 0); - else if (dev_priv->chipset == 0xa3) - xf_emit(ctx, 0xd5, 0); - else if (dev_priv->chipset == 0xa5) - xf_emit(ctx, 0x99, 0); - else if (dev_priv->chipset == 0xaa) - xf_emit(ctx, 0x65, 0); - else - xf_emit(ctx, 0x6d, 0); - nv50_graph_construct_gene_unk9(ctx); + nv50_graph_construct_gene_unk34xx(ctx); + nv50_graph_construct_gene_csched(ctx); + nv50_graph_construct_gene_unk1cxx(ctx); + nv50_graph_construct_gene_strmout(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 1 */ ctx->ctxvals_pos = offset + 1; - nv50_graph_construct_gene_unk1(ctx); + nv50_graph_construct_gene_unk10xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 2 */ ctx->ctxvals_pos = offset + 2; - if (dev_priv->chipset == 0xa0) { - nv50_graph_construct_gene_unk2(ctx); - } - xf_emit(ctx, 0x36, 0); - nv50_graph_construct_gene_unk5(ctx); + if (dev_priv->chipset == 0xa0) + nv50_graph_construct_gene_unk14xx(ctx); + nv50_graph_construct_gene_unk24xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 3 */ ctx->ctxvals_pos = offset + 3; - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - nv50_graph_construct_gene_unk6(ctx); + nv50_graph_construct_gene_vfetch(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 4 */ ctx->ctxvals_pos = offset + 4; - if (dev_priv->chipset == 0xa0) - xf_emit(ctx, 0xa80, 0); - else if (dev_priv->chipset == 0xa3) - xf_emit(ctx, 0xa7c, 0); - else - xf_emit(ctx, 0xa7a, 0); - xf_emit(ctx, 1, 0x3fffff); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x1fff); + nv50_graph_construct_gene_ccache(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 5 */ ctx->ctxvals_pos = offset + 5; - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 0xb, 0); - xf_emit(ctx, 2, 0x4e3bfdf); - xf_emit(ctx, 3, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 0x4e3bfdf); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0); + nv50_graph_construct_gene_ropm2(ctx); + nv50_graph_construct_gene_ropm1(ctx); + /* per-ROP context */ for (i = 0; i < 8; i++) if (units & (1<<(i+16))) nv50_graph_construct_gene_ropc(ctx); @@ -1238,10 +1273,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) /* Strand 6 */ ctx->ctxvals_pos = offset + 6; - nv50_graph_construct_gene_unk3(ctx); - xf_emit(ctx, 0xb, 0); - nv50_graph_construct_gene_unk4(ctx); - nv50_graph_construct_gene_unk7(ctx); + nv50_graph_construct_gene_zcull(ctx); + nv50_graph_construct_gene_clipid(ctx); + nv50_graph_construct_gene_eng2d(ctx); if (units & (1 << 0)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 1)) @@ -1269,7 +1303,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) if (units & (1 << 9)) nv50_graph_construct_xfer_tp(ctx); } else { - nv50_graph_construct_gene_unk2(ctx); + nv50_graph_construct_gene_unk14xx(ctx); } if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; @@ -1289,10 +1323,71 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) * non-trivial demagiced parts of ctx init go here */ +static void +nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx) +{ + /* start of strand 0 */ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* SEEK */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 5, 0); + else if (!IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 6, 0); + else + xf_emit(ctx, 4, 0); + /* SEEK */ + /* the PGRAPH's internal FIFO */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 8*3, 0); + else + xf_emit(ctx, 0x100*3, 0); + /* and another bonus slot?!? */ + xf_emit(ctx, 3, 0); + /* and YET ANOTHER bonus slot? */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 3, 0); + /* SEEK */ + /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + xf_emit(ctx, 9, 0); + /* SEEK */ + if (dev_priv->chipset < 0x90) + xf_emit(ctx, 4, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 6*2, 0); + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 6*2, 0); + xf_emit(ctx, 2, 0); + /* SEEK */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 0x1c, 0); + else if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x1e, 0); + else + xf_emit(ctx, 0x22, 0); + /* SEEK */ + xf_emit(ctx, 0x15, 0); +} + static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) { - /* m2mf state */ + /* Strand 0, right after dispatch */ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int smallm2mf = 0; + if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98) + smallm2mf = 1; + /* SEEK */ xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ @@ -1319,427 +1414,975 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ + /* SEEK */ + if (smallm2mf) + xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */ + else + xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */ + xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */ + /* SEEK */ + if (smallm2mf) + xf_emit(ctx, 0x400, 0); /* ffffffff */ + else + xf_emit(ctx, 0x800, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */ + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */ + xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */ } static void -nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx) +nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 2, 0); /* RO */ + xf_emit(ctx, 0x800, 0); /* ffffffff */ + switch (dev_priv->chipset) { + case 0x50: + case 0x92: + case 0xa0: + xf_emit(ctx, 0x2b, 0); + break; + case 0x84: + xf_emit(ctx, 0x29, 0); + break; + case 0x94: + case 0x96: + case 0xa3: + xf_emit(ctx, 0x27, 0); + break; + case 0x86: + case 0x98: + case 0xa5: + case 0xa8: + case 0xaa: + case 0xac: + case 0xaf: + xf_emit(ctx, 0x25, 0); + break; + } + /* CB bindings, 0x80 of them. first word is address >> 8, second is + * size >> 4 | valid << 24 */ + xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */ + xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */ + xf_emit(ctx, 1, 0); /* 0 */ + xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */ + xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */ + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0x100, 0); /* ffffffff */ + xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */ + xf_emit(ctx, 8, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 3 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */ + xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ + xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ + xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */ + xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */ + xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */ +} + +static void +nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i; /* end of area 2 on pre-NVA0, area 1 on NVAx */ - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 1, 0x3ff); else - xf_emit(ctx, 1, 0x7ff); - switch (dev_priv->chipset) { - case 0x50: - case 0x86: - case 0x98: - case 0xaa: - case 0xac: - xf_emit(ctx, 0x542, 0); - break; - case 0x84: - case 0x92: - case 0x94: - case 0x96: - xf_emit(ctx, 0x942, 0); - break; - case 0xa0: - case 0xa3: - xf_emit(ctx, 0x2042, 0); - break; - case 0xa5: - case 0xa8: - xf_emit(ctx, 0x842, 0); - break; + xf_emit(ctx, 1, 0x7ff); /* 000007ff */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + for (i = 0; i < 8; i++) { + switch (dev_priv->chipset) { + case 0x50: + case 0x86: + case 0x98: + case 0xaa: + case 0xac: + xf_emit(ctx, 0xa0, 0); /* ffffffff */ + break; + case 0x84: + case 0x92: + case 0x94: + case 0x96: + xf_emit(ctx, 0x120, 0); + break; + case 0xa5: + case 0xa8: + xf_emit(ctx, 0x100, 0); /* ffffffff */ + break; + case 0xa0: + case 0xa3: + case 0xaf: + xf_emit(ctx, 0x400, 0); /* ffffffff */ + break; + } + xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */ + xf_emit(ctx, 4, 0); /* ffffffff */ } - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x27); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x26); - xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ } static void -nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx) +nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx) { + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* end of area 2 on pre-NVA0, area 1 on NVAx */ - xf_emit(ctx, 0x10, 0x04000000); - xf_emit(ctx, 0x24, 0); - xf_emit(ctx, 2, 0x04e3bfdf); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x1fe21); + xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ + xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ + xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ + xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0x0fac6881); + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 1); + xf_emit(ctx, 3, 0); + } } static void -nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx) +nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ if (dev_priv->chipset != 0x50) { - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x804); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 5, 0); /* ffffffff */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 2, 4); /* 7f, ff */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ } - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x10); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 3, 0); - else - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x804); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x1a); + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ if (dev_priv->chipset != 0x50) - xf_emit(ctx, 1, 0x7f); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 6, 0); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0x3ff); - else - xf_emit(ctx, 1, 0x7ff); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 0x38, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 0x38, 0); - xf_emit(ctx, 2, 0x88); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 0x16, 0); - xf_emit(ctx, 1, 0x26); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x3f800000); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 4, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x1a); - xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 1, 0); /* 3ff */ + xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ + xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ if (dev_priv->chipset != 0x50) - xf_emit(ctx, 0x28, 0); - else - xf_emit(ctx, 0x25, 0); - xf_emit(ctx, 1, 0x52); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x26); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x1a); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x00ffff00); - xf_emit(ctx, 1, 0); -} - -static void -nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */ - xf_emit(ctx, 1, 0x3f); - xf_emit(ctx, 0xa, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 0x04000000); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 0000000f */ if (dev_priv->chipset == 0x50) - xf_emit(ctx, 0x10, 0); + xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ else - xf_emit(ctx, 0x11, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x1001); - xf_emit(ctx, 4, 0xffff); - xf_emit(ctx, 0x20, 0); - xf_emit(ctx, 0x10, 0x3f800000); - xf_emit(ctx, 1, 0x10); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0); - else - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 3); - xf_emit(ctx, 2, 0); -} - -static void -nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx) -{ - /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */ - xf_emit(ctx, 2, 0x04000000); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 0); -} - -static void -nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */ - xf_emit(ctx, 2, 4); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x1c4d, 0); - else - xf_emit(ctx, 0x1c4b, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0x8100c12); - if (dev_priv->chipset != 0x50) - xf_emit(ctx, 1, 3); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0x80c14); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 1, 0x27); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x3c1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x16, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 1, 0); -} - -static void -nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */ - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 8, 0); - else - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x20); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x11, 0); - else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 0xf, 0); - else - xf_emit(ctx, 0xe, 0); - xf_emit(ctx, 1, 0x1a); - xf_emit(ctx, 0xd, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 8); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0x3ff); - else - xf_emit(ctx, 1, 0x7ff); - if (dev_priv->chipset == 0xa8) - xf_emit(ctx, 1, 0x1e00); - xf_emit(ctx, 0xc, 0); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 0x125, 0); - else if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0x126, 0); - else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) - xf_emit(ctx, 0x124, 0); - else - xf_emit(ctx, 0x1f7, 0); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 3, 0); - else - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0xa1, 0); - else - xf_emit(ctx, 0x5a, 0); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0x834, 0); - else if (dev_priv->chipset == 0xa0) - xf_emit(ctx, 0x1873, 0); - else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x8ba, 0); - else - xf_emit(ctx, 0x833, 0); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 0xf, 0); -} - -static void -nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */ - xf_emit(ctx, 2, 0); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 2, 1); - else - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0x100); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 8); - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 3, 1); - xf_emit(ctx, 1, 0xcf); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 3, 1); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x15); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x4444480); - xf_emit(ctx, 0x37, 0); -} - -static void -nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx) -{ - /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */ - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x100); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x10001); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x10001); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x10001); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 2); -} - -static void -nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */ - xf_emit(ctx, 1, 0x3f800000); - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x1a); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x12, 0); - xf_emit(ctx, 1, 0x00ffff00); - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 0xf, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 3); - else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 0x04000000); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 5); - xf_emit(ctx, 1, 0x52); - if (dev_priv->chipset == 0x50) { - xf_emit(ctx, 0x13, 0); - } else { - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x11, 0); - else - xf_emit(ctx, 0x10, 0); - } - xf_emit(ctx, 0x10, 0x3f800000); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 0x26, 0); - xf_emit(ctx, 1, 0x8100c12); - xf_emit(ctx, 1, 5); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 4, 0xffff); - if (dev_priv->chipset != 0x50) - xf_emit(ctx, 1, 3); - if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0x1f, 0); - else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0xc, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x00ffff00); - xf_emit(ctx, 1, 0x1a); + xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */ + xf_emit(ctx, 3, 0); /* f, 0, 0 */ + xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */ + xf_emit(ctx, 3, 0); /* f, 0, 0 */ + xf_emit(ctx, 3, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ if (dev_priv->chipset != 0x50) { - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 3); + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } + xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */ + xf_emit(ctx, 1, 0); /* f */ + xf_emit(ctx, 1, 0); /* 0? */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 003fffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* 0000000f */ +} + +static void +nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 1, 0); /* 0000ffff */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ + /* SEEK */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ + xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ +} + +static void +nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx) +{ + /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */ + /* SEEK */ + xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */ + xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */ + xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ + xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */ + xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */ + xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */ +} + +static void +nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i; + /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */ + /* SEEK */ + xf_emit(ctx, 0x33, 0); + /* SEEK */ + xf_emit(ctx, 2, 0); + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ + + xf_emit(ctx, 4, 0); /* RO */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ } - if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0x26, 0); else - xf_emit(ctx, 0x3c, 0); - xf_emit(ctx, 1, 0x102); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 4, 4); + { + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ + xf_emit(ctx, 1, 0); /* 1ff */ + xf_emit(ctx, 8, 0); /* 0? */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 1); /* 00000001 */ + /* SEEK */ if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 8, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0x3ff); + xf_emit(ctx, 2, 4); /* 000000ff */ + xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 1); /* 00000001 */ + for (i = 0; i < 10; i++) { + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* ffffffff */ + xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */ + xf_emit(ctx, 0x10, 0); /* ffffffff */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */ + xf_emit(ctx, 1, 1); /* 00000001 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ + xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 000003ff */ +} + +static void +nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int acnt = 0x10, rep, i; + /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */ + if (IS_NVA3F(dev_priv->chipset)) + acnt = 0x20; + /* SEEK */ + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ + xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ + } + xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */ + xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */ + xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */ + xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */ + xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0xb, 0); /* RO */ + else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 0x9, 0); /* RO */ else - xf_emit(ctx, 1, 0x7ff); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x102); - xf_emit(ctx, 9, 0); - xf_emit(ctx, 4, 4); - xf_emit(ctx, 0x2c, 0); + xf_emit(ctx, 0x8, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 7f/ff */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ + xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ + else + xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ + if (dev_priv->chipset == 0xa8) + xf_emit(ctx, 1, 0x1e00); /* 7fff */ + /* SEEK */ + xf_emit(ctx, 0xc, 0); /* RO or close */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0) + xf_emit(ctx, 2, 0); /* ffffffff */ + else + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 0x10, 0); /* 0? */ + xf_emit(ctx, 2, 0); /* weird... */ + xf_emit(ctx, 2, 0); /* RO */ + } else { + xf_emit(ctx, 8, 0); /* 0? */ + xf_emit(ctx, 1, 0); /* weird... */ + xf_emit(ctx, 2, 0); /* RO */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ + xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ + xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ + xf_emit(ctx, 1, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ + xf_emit(ctx, 1, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* RO */ + xf_emit(ctx, 2, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */ + xf_emit(ctx, 1, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ + xf_emit(ctx, 3, 0); /* f/1f */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, acnt, 0); /* f */ + xf_emit(ctx, 3, 0); /* f/1f */ + } + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 2, 0); /* RO */ + else + xf_emit(ctx, 5, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ + /* SEEK */ + if (dev_priv->chipset < 0xa0) { + xf_emit(ctx, 0x41, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0x11, 0); /* RO */ + } else if (!IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x50, 0); /* RO */ + else + xf_emit(ctx, 0x58, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, 1, 1); /* 1 UNK0DEC */ + /* SEEK */ + xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ + xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x1d, 0); /* RO */ + else + xf_emit(ctx, 0x16, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + /* SEEK */ + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 8, 0); /* RO */ + else if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0xc, 0); /* RO */ + else + xf_emit(ctx, 7, 0); /* RO */ + /* SEEK */ + xf_emit(ctx, 0xa, 0); /* RO */ + if (dev_priv->chipset == 0xa0) + rep = 0xc; + else + rep = 4; + for (i = 0; i < rep; i++) { + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x20, 0); /* ffffffff */ + xf_emit(ctx, 0x200, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ + xf_emit(ctx, 4, 0); /* ffffffff */ + } + /* SEEK */ + xf_emit(ctx, 1, 0); /* 113/111 */ + xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ + xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */ + xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 7, 0); /* weird... */ + else + xf_emit(ctx, 5, 0); /* weird... */ +} + +static void +nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */ + /* SEEK */ + xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ + xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ + xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ + if (dev_priv->chipset < 0xa0) { + /* this is useless on everything but the original NV50, + * guess they forgot to nuke it. Or just didn't bother. */ + xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ + xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */ + xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */ + } + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ + xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */ + xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */ + xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */ + xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */ + xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ + xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */ + xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */ + xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */ + xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */ + xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */ + xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */ + xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */ + xf_emit(ctx, 1, 0); /* 00000001 UNK888 */ + xf_emit(ctx, 1, 4); /* 0000003f UNK884 */ + xf_emit(ctx, 1, 0); /* 00000007 UNK880 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */ + xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */ + xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK260 */ + xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */ + /* SEEK */ + xf_emit(ctx, 0x10, 0); + /* SEEK */ + xf_emit(ctx, 0x27, 0); +} + +static void +nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */ + /* SEEK */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* 000003ff */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */ + xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */ + xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */ + xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */ + xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */ + xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */ + xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ + xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ + xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ + xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ + xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + /* SEEK */ + xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ + switch (dev_priv->chipset) { + case 0x50: + case 0x92: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x80, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */ + break; + case 0x84: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x60, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ + break; + case 0x94: + case 0x96: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x40, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */ + break; + case 0x86: + case 0x98: + xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ + xf_emit(ctx, 0x10, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ + break; + case 0xa0: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0xf0, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */ + break; + case 0xa3: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x60, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ + break; + case 0xa5: + case 0xaf: + xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ + xf_emit(ctx, 0x30, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */ + break; + case 0xaa: + xf_emit(ctx, 0x12, 0); + break; + case 0xa8: + case 0xac: + xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ + xf_emit(ctx, 0x10, 0); /* fff */ + xf_emit(ctx, 2, 0); /* ff, fff */ + xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ + break; + } + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000000 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 0000001f */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 4, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 000000ff */ +} + +static void +nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ + xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */ + xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ + xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */ + xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ + xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ + else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */ + xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ + xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ + if (dev_priv->chipset != 0x50) { + xf_emit(ctx, 1, 0); /* 3ff */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ + } + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x1c, 0); /* RO */ + else if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x9, 0); + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ + xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ + if (dev_priv->chipset != 0x50) { + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ + xf_emit(ctx, 1, 0); /* 3ff */ + } + /* XXX: the following block could belong either to unk1cxx, or + * to STRMOUT. Rather hard to tell. */ + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x25, 0); + else + xf_emit(ctx, 0x3b, 0); +} + +static void +nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ + xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ + xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ + } + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ + else + xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ + xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ + xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ + xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ + xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ + } + xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */ + xf_emit(ctx, 2, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + /* SEEK */ + xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000000? */ + xf_emit(ctx, 2, 0); /* ffffffff */ +} + +static void +nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ +} + +static void +nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* SEEK */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 2, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 7 */ + /* SEEK */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ + xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ + xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ } static void @@ -1749,443 +2392,709 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) int magic2; if (dev_priv->chipset == 0x50) { magic2 = 0x00003e60; - } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + } else if (!IS_NVA3F(dev_priv->chipset)) { magic2 = 0x001ffe67; } else { magic2 = 0x00087e67; } - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 4, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 7, 0); - if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 0x15); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset)) + xf_emit(ctx, 1, 0x15); /* 000000ff */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x400); - xf_emit(ctx, 1, 0x300); - xf_emit(ctx, 1, 0x1001); + xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ + xf_emit(ctx, 1, 4); /* 7 */ + xf_emit(ctx, 1, 0x400); /* fffffff */ + xf_emit(ctx, 1, 0x300); /* ffff */ + xf_emit(ctx, 1, 0x1001); /* 1fff */ if (dev_priv->chipset != 0xa0) { - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 0); + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ else - xf_emit(ctx, 1, 0x15); + xf_emit(ctx, 1, 0x15); /* ff */ } - xf_emit(ctx, 3, 0); } - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x13, 0); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 0x10, 0); - xf_emit(ctx, 0x10, 0x3f800000); - xf_emit(ctx, 0x19, 0); - xf_emit(ctx, 1, 0x10); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x3f); - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ + xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ + xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 2, 0); xf_emit(ctx, 1, 0x1001); xf_emit(ctx, 0xb, 0); } else { - xf_emit(ctx, 0xc, 0); + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ } - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x11); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 4, 0); - else - xf_emit(ctx, 6, 0); - xf_emit(ctx, 3, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x0fac6881); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 1, 0); - xf_emit(ctx, 0x18, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x16, 0); + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x11); /* 3f/7f */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + if (dev_priv->chipset != 0x50) { + xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ + xf_emit(ctx, 1, 0); /* 000000ff */ + } + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 0000000f */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } else if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 2, 0); /* 00000001 */ } else { - if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 0x1b, 0); - else - xf_emit(ctx, 0x15, 0); + xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ } - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); + xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ + xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ + xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 4, 0); - else - xf_emit(ctx, 3, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 0x10, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 0x10, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 3, 0); + xf_emit(ctx, 2, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */ + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ } - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x5b, 0); + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */ + xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */ + xf_emit(ctx, 1, 0); /* 000000ff ROP */ + xf_emit(ctx, 1, 0); /* ffffffff BETA1 */ + xf_emit(ctx, 1, 0); /* ffffffff BETA4 */ + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */ } static void -nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx) +nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int magic3; - if (dev_priv->chipset == 0x50) + switch (dev_priv->chipset) { + case 0x50: magic3 = 0x1000; - else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) + break; + case 0x86: + case 0x98: + case 0xa8: + case 0xaa: + case 0xac: + case 0xaf: magic3 = 0x1e00; - else + break; + default: magic3 = 0; - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 4); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0x24, 0); + } + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x1f, 0); /* ffffffff */ else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 0x14, 0); + xf_emit(ctx, 0x0f, 0); /* ffffffff */ else - xf_emit(ctx, 0x15, 0); - xf_emit(ctx, 2, 4); + xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ + xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 0x03020100); + xf_emit(ctx, 1, 0x03020100); /* ffffffff */ else - xf_emit(ctx, 1, 0x00608080); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 2, 4); - xf_emit(ctx, 1, 0x80); + xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ if (magic3) - xf_emit(ctx, 1, magic3); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 0x24, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0x03020100); - xf_emit(ctx, 1, 3); + xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */ + xf_emit(ctx, 1, 0); /* 0000001f */ + xf_emit(ctx, 1, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ if (magic3) - xf_emit(ctx, 1, magic3); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 3); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */ + xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + xf_emit(ctx, 1, 0); /* 111/113 */ if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) - xf_emit(ctx, 0x1024, 0); + xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ else if (dev_priv->chipset < 0xa0) - xf_emit(ctx, 0xa24, 0); - else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) - xf_emit(ctx, 0x214, 0); + xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ + else if (!IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x210, 0); /* ffffffff */ else - xf_emit(ctx, 0x414, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 3); - xf_emit(ctx, 2, 0); + xf_emit(ctx, 0x410, 0); /* ffffffff */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ + xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ + xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ } static void -nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx) +nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int magic1, magic2; if (dev_priv->chipset == 0x50) { magic1 = 0x3ff; magic2 = 0x00003e60; - } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + } else if (!IS_NVA3F(dev_priv->chipset)) { magic1 = 0x7ff; magic2 = 0x001ffe67; } else { magic1 = 0x7ff; magic2 = 0x00087e67; } - xf_emit(ctx, 3, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0xc, 0); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 0xb, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 4, 0xffff); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 1, 3); - xf_emit(ctx, 1, 0); - } else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0xa, 0); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 1, 0); - xf_emit(ctx, 0x18, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 1, 0); - } - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 3, 0xcf); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0xa, 0); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x11); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 2, 1); - else - xf_emit(ctx, 1, 1); - if(dev_priv->chipset == 0x50) - xf_emit(ctx, 1, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 5, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, magic1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 0x28, 0); - xf_emit(ctx, 8, 8); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 8, 0x400); - xf_emit(ctx, 8, 0x300); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x20); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0x100); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x40); - xf_emit(ctx, 1, 0x100); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 3); - xf_emit(ctx, 4, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 9, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x400); - xf_emit(ctx, 1, 0x300); - xf_emit(ctx, 1, 0x1001); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 4, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 1, 0xf); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 0x15, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 3, 0); - } else - xf_emit(ctx, 0x17, 0); - if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 0x0fac6881); - xf_emit(ctx, 1, magic2); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 3, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 2, 1); - else - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 2, 0); - else if (dev_priv->chipset != 0x50) - xf_emit(ctx, 1, 0); -} - -static void -nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 2, 0); - else - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0x2a712488); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x4085c000); - xf_emit(ctx, 1, 0x40); - xf_emit(ctx, 1, 0x100); - xf_emit(ctx, 1, 0x10100); - xf_emit(ctx, 1, 0x02800000); -} - -static void -nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - xf_emit(ctx, 2, 0x04e3bfdf); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x00ffff00); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 2, 1); - else - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 0x00ffff00); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0x30201000); - xf_emit(ctx, 1, 0x70605040); - xf_emit(ctx, 1, 0xb8a89888); - xf_emit(ctx, 1, 0xf8e8d8c8); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x1a); -} - -static void -nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx) -{ - struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 0xfac6881); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 2, 0); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 0xb, 0); - else - xf_emit(ctx, 0xa, 0); - xf_emit(ctx, 8, 1); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0xfac6881); - xf_emit(ctx, 1, 0xf); - xf_emit(ctx, 7, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 1); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 6, 0); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 6, 0); + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */ + xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ + xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */ + xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */ + xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */ + xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ + } else if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000003 */ } else { - xf_emit(ctx, 0xb, 0); + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ } + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + } + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ + xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + if(dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0); /* ff */ + else + xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ + xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */ + xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */ + xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ + xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */ + xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */ + xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ + xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */ + xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ + xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */ + xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */ + xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ + xf_emit(ctx, 1, 0); /* 0000ffff */ + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 2, 0); /* ffff, ff/3ff */ + xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */ + xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */ + xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */ + xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */ + xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 0); /* 00000001 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ + xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 0); /* 7 */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + } + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ + xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ + xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ + xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ + } + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ + xf_emit(ctx, 1, 0xfac6881); /* fffffff */ + xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ + xf_emit(ctx, 1, 4); /* 7 */ + xf_emit(ctx, 1, 0); /* 1 */ + xf_emit(ctx, 2, 1); /* 1 */ + xf_emit(ctx, 2, 0); /* 7, f */ + xf_emit(ctx, 1, 1); /* 1 */ + xf_emit(ctx, 1, 0); /* 7/f */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 0x9, 0); /* 1 */ + else + xf_emit(ctx, 0x8, 0); /* 1 */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 8, 1); /* 1 */ + xf_emit(ctx, 1, 0x11); /* 7f */ + xf_emit(ctx, 7, 0); /* 7f */ + xf_emit(ctx, 1, 0xfac6881); /* fffffff */ + xf_emit(ctx, 1, 0xf); /* f */ + xf_emit(ctx, 7, 0); /* f */ + xf_emit(ctx, 1, 0x11); /* 7f */ + xf_emit(ctx, 1, 1); /* 1 */ + xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + } + } +} + +static void +nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); /* 3 */ + xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ + xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ + xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ + xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ + else + xf_emit(ctx, 2, 0); /* 3ff, 1 */ + xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */ + xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */ + xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */ + xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */ + xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */ + xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ + xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ + xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ + if (dev_priv->chipset == 0x50) { + xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ + xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ + xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ + } else if (!IS_NVAAF(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */ + xf_emit(ctx, 1, 0); /* 00000003 */ + xf_emit(ctx, 1, 0); /* 000003ff */ + } else { + xf_emit(ctx, 0x6, 0); + } + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */ + xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */ +} + +static void +nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ + xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ + xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ + xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ + xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ + xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ + xf_emit(ctx, 1, 0); /* ffff0ff3 */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ + xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ + xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ + xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ + xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */ + xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ + xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ } static void @@ -2193,108 +3102,136 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; if (dev_priv->chipset < 0xa0) { - nv50_graph_construct_xfer_tp_x1(ctx); - nv50_graph_construct_xfer_tp_x2(ctx); - nv50_graph_construct_xfer_tp_x3(ctx); - if (dev_priv->chipset == 0x50) - xf_emit(ctx, 0xf, 0); - else - xf_emit(ctx, 0x12, 0); - nv50_graph_construct_xfer_tp_x4(ctx); + nv50_graph_construct_xfer_unk84xx(ctx); + nv50_graph_construct_xfer_tprop(ctx); + nv50_graph_construct_xfer_tex(ctx); + nv50_graph_construct_xfer_unk8cxx(ctx); } else { - nv50_graph_construct_xfer_tp_x3(ctx); - if (dev_priv->chipset < 0xaa) - xf_emit(ctx, 0xc, 0); - else - xf_emit(ctx, 0xa, 0); - nv50_graph_construct_xfer_tp_x2(ctx); - nv50_graph_construct_xfer_tp_x5(ctx); - nv50_graph_construct_xfer_tp_x4(ctx); - nv50_graph_construct_xfer_tp_x1(ctx); + nv50_graph_construct_xfer_tex(ctx); + nv50_graph_construct_xfer_tprop(ctx); + nv50_graph_construct_xfer_unk8cxx(ctx); + nv50_graph_construct_xfer_unk84xx(ctx); } } static void -nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx) +nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; - int i, mpcnt; - if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) - mpcnt = 1; - else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8) - mpcnt = 2; - else - mpcnt = 3; + int i, mpcnt = 2; + switch (dev_priv->chipset) { + case 0x98: + case 0xaa: + mpcnt = 1; + break; + case 0x50: + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0xa8: + case 0xac: + mpcnt = 2; + break; + case 0xa0: + case 0xa3: + case 0xa5: + case 0xaf: + mpcnt = 3; + break; + } for (i = 0; i < mpcnt; i++) { - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x80); - xf_emit(ctx, 1, 0x80007004); - xf_emit(ctx, 1, 0x04000400); + xf_emit(ctx, 1, 0); /* ff */ + xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ + xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ + xf_emit(ctx, 1, 0x04000400); /* ffffffff */ if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 1, 0xc0); - xf_emit(ctx, 1, 0x1000); - xf_emit(ctx, 2, 0); - if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) { - xf_emit(ctx, 1, 0xe00); - xf_emit(ctx, 1, 0x1e00); + xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ + xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ + if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) { + xf_emit(ctx, 1, 0xe00); /* 7fff */ + xf_emit(ctx, 1, 0x1e00); /* 7fff */ } - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ if (dev_priv->chipset == 0x50) - xf_emit(ctx, 2, 0x1000); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 2); - if (dev_priv->chipset >= 0xaa) - xf_emit(ctx, 0xb, 0); + xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ + xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ + xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + if (IS_NVAAF(dev_priv->chipset)) + xf_emit(ctx, 0xb, 0); /* RO */ else if (dev_priv->chipset >= 0xa0) - xf_emit(ctx, 0xc, 0); + xf_emit(ctx, 0xc, 0); /* RO */ else - xf_emit(ctx, 0xa, 0); + xf_emit(ctx, 0xa, 0); /* RO */ } - xf_emit(ctx, 1, 0x08100c12); - xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 0); /* ff/3ff */ if (dev_priv->chipset >= 0xa0) { - xf_emit(ctx, 1, 0x1fe21); + xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ } - xf_emit(ctx, 5, 0); - xf_emit(ctx, 4, 0xffff); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 2, 0x10001); - xf_emit(ctx, 1, 1); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 0x1fe21); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 1); - xf_emit(ctx, 4, 0); - xf_emit(ctx, 1, 0x08100c12); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 8, 0); - xf_emit(ctx, 1, 0xfac6881); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) - xf_emit(ctx, 1, 3); - xf_emit(ctx, 3, 0); - xf_emit(ctx, 1, 4); - xf_emit(ctx, 9, 0); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 2, 1); - xf_emit(ctx, 1, 2); - xf_emit(ctx, 3, 1); - xf_emit(ctx, 1, 0); - if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { - xf_emit(ctx, 8, 2); - xf_emit(ctx, 0x10, 1); - xf_emit(ctx, 8, 2); - xf_emit(ctx, 0x18, 1); - xf_emit(ctx, 3, 0); + xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ + xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ + xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ + xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ + xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ + xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ + xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ + xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ + xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ + xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ + xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ + xf_emit(ctx, 1, 0); /* ff/3ff */ + xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ + xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */ + xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ + xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ + xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ + xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */ + xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ + xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ + xf_emit(ctx, 1, 0); /* 00000007 */ + xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ + xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ + if (IS_NVA3F(dev_priv->chipset)) + xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ + xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ + xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ + xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */ + xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ + xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ + xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ + xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ + if (IS_NVA3F(dev_priv->chipset)) { + xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ + xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ + xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ + xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ + xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ } - xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ + xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ + /* XXX: demagic this part some day */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 0x3a0, 0); else if (dev_priv->chipset < 0x94) @@ -2303,9 +3240,9 @@ nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx) xf_emit(ctx, 0x39f, 0); else xf_emit(ctx, 0x3a3, 0); - xf_emit(ctx, 1, 0x11); - xf_emit(ctx, 1, 0); - xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ + xf_emit(ctx, 1, 0); /* 7 OPERATION */ + xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */ xf_emit(ctx, 0x2d, 0); } @@ -2323,52 +3260,56 @@ nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) if (dev_priv->chipset < 0xa0) { for (i = 0; i < 8; i++) { ctx->ctxvals_pos = offset + i; + /* that little bugger belongs to csched. No idea + * what it's doing here. */ if (i == 0) - xf_emit(ctx, 1, 0x08100c12); + xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ if (units & (1 << i)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; } } else { /* Strand 0: TPs 0, 1 */ ctx->ctxvals_pos = offset; - xf_emit(ctx, 1, 0x08100c12); + /* that little bugger belongs to csched. No idea + * what it's doing here. */ + xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ if (units & (1 << 0)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 1)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; - /* Strand 0: TPs 2, 3 */ + /* Strand 1: TPs 2, 3 */ ctx->ctxvals_pos = offset + 1; if (units & (1 << 2)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 3)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; - /* Strand 0: TPs 4, 5, 6 */ + /* Strand 2: TPs 4, 5, 6 */ ctx->ctxvals_pos = offset + 2; if (units & (1 << 4)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 5)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 6)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; - /* Strand 0: TPs 7, 8, 9 */ + /* Strand 3: TPs 7, 8, 9 */ ctx->ctxvals_pos = offset + 3; if (units & (1 << 7)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 8)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 9)) - nv50_graph_construct_xfer_tp2(ctx); + nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 91ef93cf1f35..a53fc974332b 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -32,39 +32,87 @@ struct nv50_instmem_priv { uint32_t save1700[5]; /* 0x1700->0x1710 */ - struct nouveau_gpuobj_ref *pramin_pt; - struct nouveau_gpuobj_ref *pramin_bar; - struct nouveau_gpuobj_ref *fb_bar; + struct nouveau_gpuobj *pramin_pt; + struct nouveau_gpuobj *pramin_bar; + struct nouveau_gpuobj *fb_bar; }; -#define NV50_INSTMEM_PAGE_SHIFT 12 -#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) -#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) +static void +nv50_channel_del(struct nouveau_channel **pchan) +{ + struct nouveau_channel *chan; -/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN - */ -#define BAR0_WI32(g, o, v) do { \ - uint32_t offset; \ - if ((g)->im_backing) { \ - offset = (g)->im_backing_start; \ - } else { \ - offset = chan->ramin->gpuobj->im_backing_start; \ - offset += (g)->im_pramin->start; \ - } \ - offset += (o); \ - nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \ -} while (0) + chan = *pchan; + *pchan = NULL; + if (!chan) + return; + + nouveau_gpuobj_ref(NULL, &chan->ramfc); + nouveau_gpuobj_ref(NULL, &chan->vm_pd); + if (chan->ramin_heap.free_stack.next) + drm_mm_takedown(&chan->ramin_heap); + nouveau_gpuobj_ref(NULL, &chan->ramin); + kfree(chan); +} + +static int +nv50_channel_new(struct drm_device *dev, u32 size, + struct nouveau_channel **pchan) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; + u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; + struct nouveau_channel *chan; + int ret; + + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = dev; + + ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + + ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : + chan->ramin->pinst + pgd, + chan->ramin->vinst + pgd, + 0x4000, NVOBJ_FLAG_ZERO_ALLOC, + &chan->vm_pd); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : + chan->ramin->pinst + fc, + chan->ramin->vinst + fc, 0x100, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + + *pchan = chan; + return 0; +} int nv50_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan; - uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; - uint32_t save_nv001700; - uint64_t v; struct nv50_instmem_priv *priv; + struct nouveau_channel *chan; int ret, i; + u32 tmp; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -75,27 +123,61 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0x1700; i <= 0x1710; i += 4) priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); - /* Reserve the last MiB of VRAM, we should probably try to avoid - * setting up the below tables over the top of the VBIOS image at - * some point. - */ - dev_priv->ramin_rsvd_vram = 1 << 20; - c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; - c_size = 128 << 10; - c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; - c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; - c_base = c_vmpd + 0x4000; - pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size); + /* Global PRAMIN heap */ + ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); + if (ret) { + NV_ERROR(dev, "Failed to init RAMIN heap\n"); + return -ENOMEM; + } - NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset); - NV_DEBUG(dev, " VBIOS image: 0x%08x\n", - (nv_rd32(dev, 0x619f04) & ~0xff) << 8); - NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20); - NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10); + /* we need a channel to plug into the hw to control the BARs */ + ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); + if (ret) + return ret; + chan = dev_priv->fifos[127] = dev_priv->fifos[0]; - /* Determine VM layout, we need to do this first to make sure - * we allocate enough memory for all the page tables. - */ + /* allocate page table for PRAMIN BAR */ + ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, + 0x1000, NVOBJ_FLAG_ZERO_ALLOC, + &priv->pramin_pt); + if (ret) + return ret; + + nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63); + nv_wo32(chan->vm_pd, 0x0004, 0); + + /* DMA object for PRAMIN BAR */ + ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar); + if (ret) + return ret; + nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000); + nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); + nv_wo32(priv->pramin_bar, 0x08, 0x00000000); + nv_wo32(priv->pramin_bar, 0x0c, 0x00000000); + nv_wo32(priv->pramin_bar, 0x10, 0x00000000); + nv_wo32(priv->pramin_bar, 0x14, 0x00000000); + + /* map channel into PRAMIN, gpuobj didn't do it for us */ + ret = nv50_instmem_bind(dev, chan->ramin); + if (ret) + return ret; + + /* poke regs... */ + nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); + nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); + nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4)); + + tmp = nv_ri32(dev, 0); + nv_wi32(dev, 0, ~tmp); + if (nv_ri32(dev, 0) != ~tmp) { + NV_ERROR(dev, "PRAMIN readback failed\n"); + return -EIO; + } + nv_wi32(dev, 0, tmp); + + dev_priv->ramin_available = true; + + /* Determine VM layout */ dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); dev_priv->vm_gart_size = NV50_VM_BLOCK; @@ -115,172 +197,41 @@ nv50_instmem_init(struct drm_device *dev) dev_priv->vm_vram_base, dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1); - c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8); - - /* Map BAR0 PRAMIN aperture over the memory we want to use */ - save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN); - nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16)); - - /* Create a fake channel, and use it as our "dummy" channels 0/127. - * The main reason for creating a channel is so we can use the gpuobj - * code. However, it's probably worth noting that NVIDIA also setup - * their channels 0/127 with the same values they configure here. - * So, there may be some other reason for doing this. - * - * Have to create the entire channel manually, as the real channel - * creation code assumes we have PRAMIN access, and we don't until - * we're done here. - */ - chan = kzalloc(sizeof(*chan), GFP_KERNEL); - if (!chan) - return -ENOMEM; - chan->id = 0; - chan->dev = dev; - chan->file_priv = (struct drm_file *)-2; - dev_priv->fifos[0] = dev_priv->fifos[127] = chan; - - INIT_LIST_HEAD(&chan->ramht_refs); - - /* Channel's PRAMIN object + heap */ - ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, - NULL, &chan->ramin); - if (ret) - return ret; - - if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base)) - return -ENOMEM; - - /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ - ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, - 0x4000, 0, NULL, &chan->ramfc); - if (ret) - return ret; - - for (i = 0; i < c_vmpd; i += 4) - BAR0_WI32(chan->ramin->gpuobj, i, 0); - - /* VM page directory */ - ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, - 0x4000, 0, &chan->vm_pd, NULL); - if (ret) - return ret; - for (i = 0; i < 0x4000; i += 8) { - BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); - BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); - } - - /* PRAMIN page table, cheat and map into VM at 0x0000000000. - * We map the entire fake channel into the start of the PRAMIN BAR - */ - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, - 0, &priv->pramin_pt); - if (ret) - return ret; - - v = c_offset | 1; - if (dev_priv->vram_sys_base) { - v += dev_priv->vram_sys_base; - v |= 0x30; - } - - i = 0; - while (v < dev_priv->vram_sys_base + c_offset + c_size) { - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v)); - BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v)); - v += 0x1000; - i += 8; - } - - while (i < pt_size) { - BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000); - BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); - i += 8; - } - - BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); - BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); - /* VRAM page table(s), mapped into VM at +1GiB */ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, - NV50_VM_BLOCK/65536*8, 0, 0, - &chan->vm_vram_pt[i]); + ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8, + 0, NVOBJ_FLAG_ZERO_ALLOC, + &chan->vm_vram_pt[i]); if (ret) { - NV_ERROR(dev, "Error creating VRAM page tables: %d\n", - ret); + NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret); dev_priv->vm_vram_pt_nr = i; return ret; } - dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj; + dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]; - for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size; - v += 4) - BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0); - - BAR0_WI32(chan->vm_pd, 0x10 + (i*8), - chan->vm_vram_pt[i]->instance | 0x61); - BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0); + nv_wo32(chan->vm_pd, 0x10 + (i*8), + chan->vm_vram_pt[i]->vinst | 0x61); + nv_wo32(chan->vm_pd, 0x14 + (i*8), 0); } - /* DMA object for PRAMIN BAR */ - ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, - &priv->pramin_bar); - if (ret) - return ret; - BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); - BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); - /* DMA object for FB BAR */ - ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, - &priv->fb_bar); + ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); if (ret) return ret; - BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000); - BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 + - pci_resource_len(dev->pdev, 1) - 1); - BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000); - BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000); - BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000); - BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000); + nv_wo32(priv->fb_bar, 0x00, 0x7fc00000); + nv_wo32(priv->fb_bar, 0x04, 0x40000000 + + pci_resource_len(dev->pdev, 1) - 1); + nv_wo32(priv->fb_bar, 0x08, 0x40000000); + nv_wo32(priv->fb_bar, 0x0c, 0x00000000); + nv_wo32(priv->fb_bar, 0x10, 0x00000000); + nv_wo32(priv->fb_bar, 0x14, 0x00000000); - /* Poke the relevant regs, and pray it works :) */ - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); - nv_wr32(dev, NV50_PUNK_UNK1710, 0); - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | - NV50_PUNK_BAR_CFG_BASE_VALID); - nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | - NV50_PUNK_BAR1_CTXDMA_VALID); - nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | - NV50_PUNK_BAR3_CTXDMA_VALID); + dev_priv->engine.instmem.flush(dev); + nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4)); for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); - /* Assume that praying isn't enough, check that we can re-read the - * entire fake channel back from the PRAMIN BAR */ - for (i = 0; i < c_size; i += 4) { - if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) { - NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n", - i); - return -EINVAL; - } - } - - nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); - - /* Global PRAMIN heap */ - if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) { - NV_ERROR(dev, "Failed to init RAMIN heap\n"); - } - - /*XXX: incorrect, but needed to make hash func "work" */ - dev_priv->ramht_offset = 0x10000; - dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; return 0; } @@ -297,29 +248,24 @@ nv50_instmem_takedown(struct drm_device *dev) if (!priv) return; + dev_priv->ramin_available = false; + /* Restore state from before init */ for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); - nouveau_gpuobj_ref_del(dev, &priv->fb_bar); - nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); - nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); + nouveau_gpuobj_ref(NULL, &priv->fb_bar); + nouveau_gpuobj_ref(NULL, &priv->pramin_bar); + nouveau_gpuobj_ref(NULL, &priv->pramin_pt); /* Destroy dummy channel */ if (chan) { - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); - dev_priv->vm_vram_pt[i] = NULL; - } + for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) + nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); dev_priv->vm_vram_pt_nr = 0; - nouveau_gpuobj_del(dev, &chan->vm_pd); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); - nouveau_gpuobj_ref_del(dev, &chan->ramin); - drm_mm_takedown(&chan->ramin_heap); - - dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; - kfree(chan); + nv50_channel_del(&dev_priv->fifos[0]); + dev_priv->fifos[127] = NULL; } dev_priv->engine.instmem.priv = NULL; @@ -331,14 +277,14 @@ nv50_instmem_suspend(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->fifos[0]; - struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; + struct nouveau_gpuobj *ramin = chan->ramin; int i; - ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size); + ramin->im_backing_suspend = vmalloc(ramin->size); if (!ramin->im_backing_suspend) return -ENOMEM; - for (i = 0; i < ramin->im_pramin->size; i += 4) + for (i = 0; i < ramin->size; i += 4) ramin->im_backing_suspend[i/4] = nv_ri32(dev, i); return 0; } @@ -349,23 +295,25 @@ nv50_instmem_resume(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_channel *chan = dev_priv->fifos[0]; - struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; + struct nouveau_gpuobj *ramin = chan->ramin; int i; - nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16)); - for (i = 0; i < ramin->im_pramin->size; i += 4) - BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]); + dev_priv->ramin_available = false; + dev_priv->ramin_base = ~0; + for (i = 0; i < ramin->size; i += 4) + nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]); + dev_priv->ramin_available = true; vfree(ramin->im_backing_suspend); ramin->im_backing_suspend = NULL; /* Poke the relevant regs, and pray it works :) */ - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); nv_wr32(dev, NV50_PUNK_UNK1710, 0); - nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | NV50_PUNK_BAR_CFG_BASE_VALID); - nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) | + nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | NV50_PUNK_BAR1_CTXDMA_VALID); - nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | + nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | NV50_PUNK_BAR3_CTXDMA_VALID); for (i = 0; i < 8; i++) @@ -381,7 +329,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, if (gpuobj->im_backing) return -EINVAL; - *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE); + *sz = ALIGN(*sz, 4096); if (*sz == 0) return -EINVAL; @@ -399,9 +347,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, return ret; } - gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start; - gpuobj->im_backing_start <<= PAGE_SHIFT; - + gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; return 0; } @@ -424,7 +370,7 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj; + struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; uint32_t pte, pte_end; uint64_t vram; @@ -436,11 +382,11 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte = (gpuobj->im_pramin->start >> 12) << 1; pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; - vram = gpuobj->im_backing_start; + vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", gpuobj->im_pramin->start, pte, pte_end); - NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); + NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); vram |= 1; if (dev_priv->vram_sys_base) { @@ -449,9 +395,10 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) } while (pte < pte_end) { - nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); - nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); - vram += NV50_INSTMEM_PAGE_SIZE; + nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); + nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); + vram += 0x1000; + pte += 2; } dev_priv->engine.instmem.flush(dev); @@ -472,12 +419,17 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) if (gpuobj->im_bound == 0) return -EINVAL; + /* can happen during late takedown */ + if (unlikely(!dev_priv->ramin_available)) + return 0; + pte = (gpuobj->im_pramin->start >> 12) << 1; pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; while (pte < pte_end) { - nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); - nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); + nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); + nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); + pte += 2; } dev_priv->engine.instmem.flush(dev); @@ -489,7 +441,7 @@ void nv50_instmem_flush(struct drm_device *dev) { nv_wr32(dev, 0x00330c, 0x00000001); - if (!nv_wait(0x00330c, 0x00000002, 0x00000000)) + if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); } @@ -497,7 +449,7 @@ void nv84_instmem_flush(struct drm_device *dev) { nv_wr32(dev, 0x070000, 0x00000001); - if (!nv_wait(0x070000, 0x00000002, 0x00000000)) + if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); } @@ -505,7 +457,7 @@ void nv50_vm_flush(struct drm_device *dev, int engine) { nv_wr32(dev, 0x100c80, (engine << 16) | 1); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) + if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); } diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c new file mode 100644 index 000000000000..7dbb305d7e63 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_pm.c @@ -0,0 +1,131 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_bios.h" +#include "nouveau_pm.h" + +struct nv50_pm_state { + struct nouveau_pm_level *perflvl; + struct pll_lims pll; + enum pll_types type; + int N, M, P; +}; + +int +nv50_pm_clock_get(struct drm_device *dev, u32 id) +{ + struct pll_lims pll; + int P, N, M, ret; + u32 reg0, reg1; + + ret = get_pll_limits(dev, id, &pll); + if (ret) + return ret; + + reg0 = nv_rd32(dev, pll.reg + 0); + reg1 = nv_rd32(dev, pll.reg + 4); + P = (reg0 & 0x00070000) >> 16; + N = (reg1 & 0x0000ff00) >> 8; + M = (reg1 & 0x000000ff); + + return ((pll.refclk * N / M) >> P); +} + +void * +nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, + u32 id, int khz) +{ + struct nv50_pm_state *state; + int dummy, ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + state->type = id; + state->perflvl = perflvl; + + ret = get_pll_limits(dev, id, &state->pll); + if (ret < 0) { + kfree(state); + return (ret == -ENOENT) ? NULL : ERR_PTR(ret); + } + + ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M, + &dummy, &dummy, &state->P); + if (ret < 0) { + kfree(state); + return ERR_PTR(ret); + } + + return state; +} + +void +nv50_pm_clock_set(struct drm_device *dev, void *pre_state) +{ + struct nv50_pm_state *state = pre_state; + struct nouveau_pm_level *perflvl = state->perflvl; + u32 reg = state->pll.reg, tmp; + struct bit_entry BIT_M; + u16 script; + int N = state->N; + int M = state->M; + int P = state->P; + + if (state->type == PLL_MEMORY && perflvl->memscript && + bit_table(dev, 'M', &BIT_M) == 0 && + BIT_M.version == 1 && BIT_M.length >= 0x0b) { + script = ROM16(BIT_M.data[0x05]); + if (script) + nouveau_bios_run_init_table(dev, script, NULL); + script = ROM16(BIT_M.data[0x07]); + if (script) + nouveau_bios_run_init_table(dev, script, NULL); + script = ROM16(BIT_M.data[0x09]); + if (script) + nouveau_bios_run_init_table(dev, script, NULL); + + nouveau_bios_run_init_table(dev, perflvl->memscript, NULL); + } + + if (state->type == PLL_MEMORY) { + nv_wr32(dev, 0x100210, 0); + nv_wr32(dev, 0x1002dc, 1); + } + + tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff; + tmp |= 0x80000000 | (P << 16); + nv_wr32(dev, reg + 0, tmp); + nv_wr32(dev, reg + 4, (N << 8) | M); + + if (state->type == PLL_MEMORY) { + nv_wr32(dev, 0x1002dc, 0); + nv_wr32(dev, 0x100210, 0x80000000); + } + + kfree(state); +} + diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index bcd4cf84a7e6..b4a5ecb199f9 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -92,7 +92,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) } /* wait for it to be done */ - if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), + if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, @@ -108,7 +108,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); - if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or), + if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or), NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c new file mode 100644 index 000000000000..dbbafed36406 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nva3_pm.c @@ -0,0 +1,95 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_bios.h" +#include "nouveau_pm.h" + +/*XXX: boards using limits 0x40 need fixing, the register layout + * is correct here, but, there's some other funny magic + * that modifies things, so it's not likely we'll set/read + * the correct timings yet.. working on it... + */ + +struct nva3_pm_state { + struct pll_lims pll; + int N, M, P; +}; + +int +nva3_pm_clock_get(struct drm_device *dev, u32 id) +{ + struct pll_lims pll; + int P, N, M, ret; + u32 reg; + + ret = get_pll_limits(dev, id, &pll); + if (ret) + return ret; + + reg = nv_rd32(dev, pll.reg + 4); + P = (reg & 0x003f0000) >> 16; + N = (reg & 0x0000ff00) >> 8; + M = (reg & 0x000000ff); + return pll.refclk * N / M / P; +} + +void * +nva3_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl, + u32 id, int khz) +{ + struct nva3_pm_state *state; + int dummy, ret; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + ret = get_pll_limits(dev, id, &state->pll); + if (ret < 0) { + kfree(state); + return (ret == -ENOENT) ? NULL : ERR_PTR(ret); + } + + ret = nv50_calc_pll2(dev, &state->pll, khz, &state->N, &dummy, + &state->M, &state->P); + if (ret < 0) { + kfree(state); + return ERR_PTR(ret); + } + + return state; +} + +void +nva3_pm_clock_set(struct drm_device *dev, void *pre_state) +{ + struct nva3_pm_state *state = pre_state; + u32 reg = state->pll.reg; + + nv_wr32(dev, reg + 4, (state->P << 16) | (state->N << 8) | state->M); + kfree(state); +} + diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index d64375871979..890c2b95fbc1 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c @@ -42,12 +42,6 @@ nvc0_fifo_reassign(struct drm_device *dev, bool enable) return false; } -bool -nvc0_fifo_cache_flush(struct drm_device *dev) -{ - return true; -} - bool nvc0_fifo_cache_pull(struct drm_device *dev, bool enable) { diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 6b451f864783..13a0f78a9088 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -50,8 +50,7 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, return ret; } - gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start; - gpuobj->im_backing_start <<= PAGE_SHIFT; + gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; return 0; } @@ -84,11 +83,11 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte = gpuobj->im_pramin->start >> 12; pte_end = (gpuobj->im_pramin->size >> 12) + pte; - vram = gpuobj->im_backing_start; + vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", gpuobj->im_pramin->start, pte, pte_end); - NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); + NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); while (pte < pte_end) { nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1); @@ -134,7 +133,7 @@ void nvc0_instmem_flush(struct drm_device *dev) { nv_wr32(dev, 0x070000, 1); - if (!nv_wait(0x070000, 0x00000002, 0x00000000)) + if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) NV_ERROR(dev, "PRAMIN flush timeout\n"); } @@ -221,10 +220,6 @@ nvc0_instmem_init(struct drm_device *dev) return -ENOMEM; } - /*XXX: incorrect, but needed to make hash func "work" */ - dev_priv->ramht_offset = 0x10000; - dev_priv->ramht_bits = 9; - dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h index ad64673ace1f..881f8a585613 100644 --- a/drivers/gpu/drm/nouveau/nvreg.h +++ b/drivers/gpu/drm/nouveau/nvreg.h @@ -263,6 +263,7 @@ # define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2 # define NV_CIO_CRE_LCD__INDEX 0x33 # define NV_CIO_CRE_LCD_LCD_SELECT 0:0 +# define NV_CIO_CRE_LCD_ROUTE_MASK 0x3b # define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36 # define NV_CIO_CRE_DDC0_WR__INDEX 0x37 # define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */ diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index d42c76c23714..18c3c71e41b1 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -56,8 +56,6 @@ static struct drm_driver driver = { .irq_uninstall = r128_driver_irq_uninstall, .irq_handler = r128_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = r128_ioctls, .dma_ioctl = r128_cce_buffers, .fops = { diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index aebe00875041..6cae4f2028d2 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -65,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ - evergreen.o evergreen_cs.o + evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index cd0290f946cf..df2b6f2b35f8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -398,65 +398,76 @@ static void atombios_disable_ss(struct drm_crtc *crtc) union atom_enable_ss { - ENABLE_LVDS_SS_PARAMETERS legacy; + ENABLE_LVDS_SS_PARAMETERS lvds_ss; + ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; + ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; }; -static void atombios_enable_ss(struct drm_crtc *crtc) +static void atombios_crtc_program_ss(struct drm_crtc *crtc, + int enable, + int pll_id, + struct radeon_atom_ss *ss) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - struct drm_encoder *encoder = NULL; - struct radeon_encoder *radeon_encoder = NULL; - struct radeon_encoder_atom_dig *dig = NULL; int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); union atom_enable_ss args; - uint16_t percentage = 0; - uint8_t type = 0, step = 0, delay = 0, range = 0; - - /* XXX add ss support for DCE4 */ - if (ASIC_IS_DCE4(rdev)) - return; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - radeon_encoder = to_radeon_encoder(encoder); - /* only enable spread spectrum on LVDS */ - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - dig = radeon_encoder->enc_priv; - if (dig && dig->ss) { - percentage = dig->ss->percentage; - type = dig->ss->type; - step = dig->ss->step; - delay = dig->ss->delay; - range = dig->ss->range; - } else - return; - } else - return; - break; - } - } - - if (!radeon_encoder) - return; memset(&args, 0, sizeof(args)); - if (ASIC_IS_AVIVO(rdev)) { - args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage); - args.v1.ucSpreadSpectrumType = type; - args.v1.ucSpreadSpectrumStep = step; - args.v1.ucSpreadSpectrumDelay = delay; - args.v1.ucSpreadSpectrumRange = range; - args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; - args.v1.ucEnable = ATOM_ENABLE; + + if (ASIC_IS_DCE4(rdev)) { + args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); + args.v2.ucSpreadSpectrumType = ss->type; + switch (pll_id) { + case ATOM_PPLL1: + args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; + args.v2.usSpreadSpectrumAmount = ss->amount; + args.v2.usSpreadSpectrumStep = ss->step; + break; + case ATOM_PPLL2: + args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; + args.v2.usSpreadSpectrumAmount = ss->amount; + args.v2.usSpreadSpectrumStep = ss->step; + break; + case ATOM_DCPLL: + args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; + args.v2.usSpreadSpectrumAmount = 0; + args.v2.usSpreadSpectrumStep = 0; + break; + case ATOM_PPLL_INVALID: + return; + } + args.v2.ucEnable = enable; + } else if (ASIC_IS_DCE3(rdev)) { + args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); + args.v1.ucSpreadSpectrumType = ss->type; + args.v1.ucSpreadSpectrumStep = ss->step; + args.v1.ucSpreadSpectrumDelay = ss->delay; + args.v1.ucSpreadSpectrumRange = ss->range; + args.v1.ucPpll = pll_id; + args.v1.ucEnable = enable; + } else if (ASIC_IS_AVIVO(rdev)) { + if (enable == ATOM_DISABLE) { + atombios_disable_ss(crtc); + return; + } + args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); + args.lvds_ss_2.ucSpreadSpectrumType = ss->type; + args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; + args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; + args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; + args.lvds_ss_2.ucEnable = enable; } else { - args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); - args.legacy.ucSpreadSpectrumType = type; - args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; - args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; - args.legacy.ucEnable = ATOM_ENABLE; + if (enable == ATOM_DISABLE) { + atombios_disable_ss(crtc); + return; + } + args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); + args.lvds_ss.ucSpreadSpectrumType = ss->type; + args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; + args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; + args.lvds_ss.ucEnable = enable; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } @@ -468,7 +479,9 @@ union adjust_pixel_clock { static u32 atombios_adjust_pll(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct radeon_pll *pll) + struct radeon_pll *pll, + bool ss_enabled, + struct radeon_atom_ss *ss) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; @@ -482,19 +495,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, /* reset the pll flags */ pll->flags = 0; - /* select the PLL algo */ - if (ASIC_IS_AVIVO(rdev)) { - if (radeon_new_pll == 0) - pll->algo = PLL_ALGO_LEGACY; - else - pll->algo = PLL_ALGO_NEW; - } else { - if (radeon_new_pll == 1) - pll->algo = PLL_ALGO_NEW; - else - pll->algo = PLL_ALGO_LEGACY; - } - if (ASIC_IS_AVIVO(rdev)) { if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || @@ -531,29 +531,22 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, } } + /* use recommended ref_div for ss */ + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + if (ss_enabled) { + if (ss->refdiv) { + pll->flags |= RADEON_PLL_USE_REF_DIV; + pll->reference_div = ss->refdiv; + } + } + } + if (ASIC_IS_AVIVO(rdev)) { /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { - pll->algo = PLL_ALGO_LEGACY; + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; - } - /* There is some evidence (often anecdotal) that RV515/RV620 LVDS - * (on some boards at least) prefers the legacy algo. I'm not - * sure whether this should handled generically or on a - * case-by-case quirk basis. Both algos should work fine in the - * majority of cases. - */ - if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && - ((rdev->family == CHIP_RV515) || - (rdev->family == CHIP_RV620))) { - /* allow the user to overrride just in case */ - if (radeon_new_pll == 1) - pll->algo = PLL_ALGO_NEW; - else - pll->algo = PLL_ALGO_LEGACY; - } } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; @@ -589,9 +582,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v1.ucTransmitterID = radeon_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; if (encoder_mode == ATOM_ENCODER_MODE_DP) { - /* may want to enable SS on DP eventually */ - /* args.v1.ucConfig |= - ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/ + if (ss_enabled) + args.v1.ucConfig |= + ADJUST_DISPLAY_CONFIG_SS_ENABLE; } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; @@ -608,11 +601,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v3.sInput.ucDispPllConfig = 0; if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - if (encoder_mode == ATOM_ENCODER_MODE_DP) { - /* may want to enable SS on DP/eDP eventually */ - /*args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE;*/ + if (ss_enabled) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE; args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ @@ -632,17 +624,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, } } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (encoder_mode == ATOM_ENCODER_MODE_DP) { - /* may want to enable SS on DP/eDP eventually */ - /*args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE;*/ + if (ss_enabled) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE; args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) { - /* want to enable SS on LVDS eventually */ - /*args.v3.sInput.ucDispPllConfig |= - DISPPLL_CONFIG_SS_ENABLE;*/ + if (ss_enabled) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE; } else { if (mode->clock > 165000) args.v3.sInput.ucDispPllConfig |= @@ -816,6 +808,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode struct radeon_pll *pll; u32 adjusted_clock; int encoder_mode = 0; + struct radeon_atom_ss ss; + bool ss_enabled = false; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { @@ -842,25 +836,123 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode break; } + if (radeon_encoder->active_device & + (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector = + radeon_get_connector_for_encoder(encoder); + struct radeon_connector *radeon_connector = + to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + int dp_clock; + + switch (encoder_mode) { + case ATOM_ENCODER_MODE_DP: + /* DP/eDP */ + dp_clock = dig_connector->dp_clock / 10; + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { + if (ASIC_IS_DCE4(rdev)) + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, + dig->lcd_ss_id, + dp_clock); + else + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, + dig->lcd_ss_id); + } else { + if (ASIC_IS_DCE4(rdev)) + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_DP, + dp_clock); + else { + if (dp_clock == 16200) { + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, + ATOM_DP_SS_ID2); + if (!ss_enabled) + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, + ATOM_DP_SS_ID1); + } else + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, + ATOM_DP_SS_ID1); + } + } + break; + case ATOM_ENCODER_MODE_LVDS: + if (ASIC_IS_DCE4(rdev)) + ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, + dig->lcd_ss_id, + mode->clock / 10); + else + ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, + dig->lcd_ss_id); + break; + case ATOM_ENCODER_MODE_DVI: + if (ASIC_IS_DCE4(rdev)) + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_TMDS, + mode->clock / 10); + break; + case ATOM_ENCODER_MODE_HDMI: + if (ASIC_IS_DCE4(rdev)) + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_HDMI, + mode->clock / 10); + break; + default: + break; + } + } + /* adjust pixel clock as needed */ - adjusted_clock = atombios_adjust_pll(crtc, mode, pll); + adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); + atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, mode->clock, ref_div, fb_div, frac_fb_div, post_div); + if (ss_enabled) { + /* calculate ss amount and step size */ + if (ASIC_IS_DCE4(rdev)) { + u32 step_size; + u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; + ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; + ss.amount |= ((amount - (ss.amount * 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & + ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; + if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) + step_size = (4 * amount * ref_div * (ss.rate * 2048)) / + (125 * 25 * pll->reference_freq / 100); + else + step_size = (2 * amount * ref_div * (ss.rate * 2048)) / + (125 * 25 * pll->reference_freq / 100); + ss.step = step_size; + } + + atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss); + } } -static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_framebuffer *radeon_fb; + struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; uint64_t fb_location; @@ -868,28 +960,43 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, int r; /* no fb bound */ - if (!crtc->fb) { + if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } - radeon_fb = to_radeon_framebuffer(crtc->fb); + if (atomic) { + radeon_fb = to_radeon_framebuffer(fb); + target_fb = fb; + } + else { + radeon_fb = to_radeon_framebuffer(crtc->fb); + target_fb = crtc->fb; + } - /* Pin framebuffer & get tilling informations */ + /* If atomic, assume fb object is pinned & idle & fenced and + * just update base pointers + */ obj = radeon_fb->obj; rbo = obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; - r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); - if (unlikely(r != 0)) { - radeon_bo_unreserve(rbo); - return -EINVAL; + + if (atomic) + fb_location = radeon_bo_gpu_offset(rbo); + else { + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + return -EINVAL; + } } + radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); - switch (crtc->fb->bits_per_pixel) { + switch (target_fb->bits_per_pixel) { case 8: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); @@ -909,7 +1016,7 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, break; default: DRM_ERROR("Unsupported screen depth %d\n", - crtc->fb->bits_per_pixel); + target_fb->bits_per_pixel); return -EINVAL; } @@ -955,10 +1062,10 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); - WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width); - WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height); + WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); + WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); @@ -977,8 +1084,8 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, else WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); - if (old_fb && old_fb != crtc->fb) { - radeon_fb = to_radeon_framebuffer(old_fb); + if (!atomic && fb && fb != crtc->fb) { + radeon_fb = to_radeon_framebuffer(fb); rbo = radeon_fb->obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) @@ -993,8 +1100,9 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, return 0; } -static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static int avivo_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -1002,33 +1110,48 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct radeon_framebuffer *radeon_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; + struct drm_framebuffer *target_fb; uint64_t fb_location; uint32_t fb_format, fb_pitch_pixels, tiling_flags; int r; /* no fb bound */ - if (!crtc->fb) { + if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } - radeon_fb = to_radeon_framebuffer(crtc->fb); + if (atomic) { + radeon_fb = to_radeon_framebuffer(fb); + target_fb = fb; + } + else { + radeon_fb = to_radeon_framebuffer(crtc->fb); + target_fb = crtc->fb; + } - /* Pin framebuffer & get tilling informations */ obj = radeon_fb->obj; rbo = obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; - r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); - if (unlikely(r != 0)) { - radeon_bo_unreserve(rbo); - return -EINVAL; + + /* If atomic, assume fb object is pinned & idle & fenced and + * just update base pointers + */ + if (atomic) + fb_location = radeon_bo_gpu_offset(rbo); + else { + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + return -EINVAL; + } } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); - switch (crtc->fb->bits_per_pixel) { + switch (target_fb->bits_per_pixel) { case 8: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | @@ -1052,7 +1175,7 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, break; default: DRM_ERROR("Unsupported screen depth %d\n", - crtc->fb->bits_per_pixel); + target_fb->bits_per_pixel); return -EINVAL; } @@ -1093,10 +1216,10 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0); - WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width); - WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height); + WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); + WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); @@ -1115,8 +1238,8 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, else WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); - if (old_fb && old_fb != crtc->fb) { - radeon_fb = to_radeon_framebuffer(old_fb); + if (!atomic && fb && fb != crtc->fb) { + radeon_fb = to_radeon_framebuffer(fb); rbo = radeon_fb->obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) @@ -1138,11 +1261,26 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE4(rdev)) - return evergreen_crtc_set_base(crtc, x, y, old_fb); + return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); else if (ASIC_IS_AVIVO(rdev)) - return avivo_crtc_set_base(crtc, x, y, old_fb); + return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); else - return radeon_crtc_set_base(crtc, x, y, old_fb); + return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + + if (ASIC_IS_DCE4(rdev)) + return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); + else if (ASIC_IS_AVIVO(rdev)) + return avivo_crtc_do_set_base(crtc, fb, x, y, 1); + else + return radeon_crtc_do_set_base(crtc, fb, x, y, 1); } /* properly set additional regs when using atombios */ @@ -1230,12 +1368,19 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, } } - atombios_disable_ss(crtc); /* always set DCPLL */ - if (ASIC_IS_DCE4(rdev)) + if (ASIC_IS_DCE4(rdev)) { + struct radeon_atom_ss ss; + bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_DCPLL, + rdev->clock.default_dispclk); + if (ss_enabled) + atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); atombios_crtc_set_dcpll(crtc); + if (ss_enabled) + atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); + } atombios_crtc_set_pll(crtc, adjusted_mode); - atombios_enable_ss(crtc); if (ASIC_IS_DCE4(rdev)) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); @@ -1311,6 +1456,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { .mode_fixup = atombios_crtc_mode_fixup, .mode_set = atombios_crtc_mode_set, .mode_set_base = atombios_crtc_set_base, + .mode_set_base_atomic = atombios_crtc_set_base_atomic, .prepare = atombios_crtc_prepare, .commit = atombios_crtc_commit, .load_lut = radeon_crtc_load_lut, diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 2f93d46ae69a..f12a5b3ec050 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -32,6 +32,7 @@ #include "atom.h" #include "avivod.h" #include "evergreen_reg.h" +#include "evergreen_blit_shaders.h" #define EVERGREEN_PFP_UCODE_SIZE 1120 #define EVERGREEN_PM4_UCODE_SIZE 1376 @@ -284,9 +285,444 @@ void evergreen_hpd_fini(struct radeon_device *rdev) } } +/* watermark setup */ + +static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, + struct radeon_crtc *radeon_crtc, + struct drm_display_mode *mode, + struct drm_display_mode *other_mode) +{ + u32 tmp = 0; + /* + * Line Buffer Setup + * There are 3 line buffers, each one shared by 2 display controllers. + * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between + * the display controllers. The paritioning is done via one of four + * preset allocations specified in bits 2:0: + * first display controller + * 0 - first half of lb (3840 * 2) + * 1 - first 3/4 of lb (5760 * 2) + * 2 - whole lb (7680 * 2) + * 3 - first 1/4 of lb (1920 * 2) + * second display controller + * 4 - second half of lb (3840 * 2) + * 5 - second 3/4 of lb (5760 * 2) + * 6 - whole lb (7680 * 2) + * 7 - last 1/4 of lb (1920 * 2) + */ + if (mode && other_mode) { + if (mode->hdisplay > other_mode->hdisplay) { + if (mode->hdisplay > 2560) + tmp = 1; /* 3/4 */ + else + tmp = 0; /* 1/2 */ + } else if (other_mode->hdisplay > mode->hdisplay) { + if (other_mode->hdisplay > 2560) + tmp = 3; /* 1/4 */ + else + tmp = 0; /* 1/2 */ + } else + tmp = 0; /* 1/2 */ + } else if (mode) + tmp = 2; /* whole */ + else if (other_mode) + tmp = 3; /* 1/4 */ + + /* second controller of the pair uses second half of the lb */ + if (radeon_crtc->crtc_id % 2) + tmp += 4; + WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); + + switch (tmp) { + case 0: + case 4: + default: + return 3840 * 2; + case 1: + case 5: + return 5760 * 2; + case 2: + case 6: + return 7680 * 2; + case 3: + case 7: + return 1920 * 2; + } +} + +static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) +{ + u32 tmp = RREG32(MC_SHARED_CHMAP); + + switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { + case 0: + default: + return 1; + case 1: + return 2; + case 2: + return 4; + case 3: + return 8; + } +} + +struct evergreen_wm_params { + u32 dram_channels; /* number of dram channels */ + u32 yclk; /* bandwidth per dram data pin in kHz */ + u32 sclk; /* engine clock in kHz */ + u32 disp_clk; /* display clock in kHz */ + u32 src_width; /* viewport width */ + u32 active_time; /* active display time in ns */ + u32 blank_time; /* blank time in ns */ + bool interlaced; /* mode is interlaced */ + fixed20_12 vsc; /* vertical scale ratio */ + u32 num_heads; /* number of active crtcs */ + u32 bytes_per_pixel; /* bytes per pixel display + overlay */ + u32 lb_size; /* line buffer allocated to pipe */ + u32 vtaps; /* vertical scaler taps */ +}; + +static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate DRAM Bandwidth and the part allocated to display. */ + fixed20_12 dram_efficiency; /* 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + dram_efficiency.full = dfixed_const(7); + dram_efficiency.full = dfixed_div(dram_efficiency, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm) +{ + /* Calculate DRAM Bandwidth and the part allocated to display. */ + fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ + disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate the display Data return Bandwidth */ + fixed20_12 return_efficiency; /* 0.8 */ + fixed20_12 sclk, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + sclk.full = dfixed_const(wm->sclk); + sclk.full = dfixed_div(sclk, a); + a.full = dfixed_const(10); + return_efficiency.full = dfixed_const(8); + return_efficiency.full = dfixed_div(return_efficiency, a); + a.full = dfixed_const(32); + bandwidth.full = dfixed_mul(a, sclk); + bandwidth.full = dfixed_mul(bandwidth, return_efficiency); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate the DMIF Request Bandwidth */ + fixed20_12 disp_clk_request_efficiency; /* 0.8 */ + fixed20_12 disp_clk, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + disp_clk.full = dfixed_const(wm->disp_clk); + disp_clk.full = dfixed_div(disp_clk, a); + a.full = dfixed_const(10); + disp_clk_request_efficiency.full = dfixed_const(8); + disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); + a.full = dfixed_const(32); + bandwidth.full = dfixed_mul(a, disp_clk); + bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ + u32 dram_bandwidth = evergreen_dram_bandwidth(wm); + u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm); + u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm); + + return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); +} + +static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm) +{ + /* Calculate the display mode Average Bandwidth + * DisplayMode should contain the source and destination dimensions, + * timing, etc. + */ + fixed20_12 bpp; + fixed20_12 line_time; + fixed20_12 src_width; + fixed20_12 bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + line_time.full = dfixed_const(wm->active_time + wm->blank_time); + line_time.full = dfixed_div(line_time, a); + bpp.full = dfixed_const(wm->bytes_per_pixel); + src_width.full = dfixed_const(wm->src_width); + bandwidth.full = dfixed_mul(src_width, bpp); + bandwidth.full = dfixed_mul(bandwidth, wm->vsc); + bandwidth.full = dfixed_div(bandwidth, line_time); + + return dfixed_trunc(bandwidth); +} + +static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm) +{ + /* First calcualte the latency in ns */ + u32 mc_latency = 2000; /* 2000 ns. */ + u32 available_bandwidth = evergreen_available_bandwidth(wm); + u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; + u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; + u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ + u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + + (wm->num_heads * cursor_line_pair_return_time); + u32 latency = mc_latency + other_heads_data_return_time + dc_latency; + u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; + fixed20_12 a, b, c; + + if (wm->num_heads == 0) + return 0; + + a.full = dfixed_const(2); + b.full = dfixed_const(1); + if ((wm->vsc.full > a.full) || + ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || + (wm->vtaps >= 5) || + ((wm->vsc.full >= a.full) && wm->interlaced)) + max_src_lines_per_dst_line = 4; + else + max_src_lines_per_dst_line = 2; + + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); + + b.full = dfixed_const(1000); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(c, b); + c.full = dfixed_const(wm->bytes_per_pixel); + b.full = dfixed_mul(b, c); + + lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b)); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); + c.full = dfixed_const(lb_fill_bw); + b.full = dfixed_div(c, b); + a.full = dfixed_div(a, b); + line_fill_time = dfixed_trunc(a); + + if (line_fill_time < wm->active_time) + return latency; + else + return latency + (line_fill_time - wm->active_time); + +} + +static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm) +{ + if (evergreen_average_bandwidth(wm) <= + (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads)) + return true; + else + return false; +}; + +static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm) +{ + if (evergreen_average_bandwidth(wm) <= + (evergreen_available_bandwidth(wm) / wm->num_heads)) + return true; + else + return false; +}; + +static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm) +{ + u32 lb_partitions = wm->lb_size / wm->src_width; + u32 line_time = wm->active_time + wm->blank_time; + u32 latency_tolerant_lines; + u32 latency_hiding; + fixed20_12 a; + + a.full = dfixed_const(1); + if (wm->vsc.full > a.full) + latency_tolerant_lines = 1; + else { + if (lb_partitions <= (wm->vtaps + 1)) + latency_tolerant_lines = 1; + else + latency_tolerant_lines = 2; + } + + latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); + + if (evergreen_latency_watermark(wm) <= latency_hiding) + return true; + else + return false; +} + +static void evergreen_program_watermarks(struct radeon_device *rdev, + struct radeon_crtc *radeon_crtc, + u32 lb_size, u32 num_heads) +{ + struct drm_display_mode *mode = &radeon_crtc->base.mode; + struct evergreen_wm_params wm; + u32 pixel_period; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 priority_a_mark = 0, priority_b_mark = 0; + u32 priority_a_cnt = PRIORITY_OFF; + u32 priority_b_cnt = PRIORITY_OFF; + u32 pipe_offset = radeon_crtc->crtc_id * 16; + u32 tmp, arb_control3; + fixed20_12 a, b, c; + + if (radeon_crtc->base.enabled && num_heads && mode) { + pixel_period = 1000000 / (u32)mode->clock; + line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + priority_a_cnt = 0; + priority_b_cnt = 0; + + wm.yclk = rdev->pm.current_mclk * 10; + wm.sclk = rdev->pm.current_sclk * 10; + wm.disp_clk = mode->clock; + wm.src_width = mode->crtc_hdisplay; + wm.active_time = mode->crtc_hdisplay * pixel_period; + wm.blank_time = line_time - wm.active_time; + wm.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm.interlaced = true; + wm.vsc = radeon_crtc->vsc; + wm.vtaps = 1; + if (radeon_crtc->rmx_type != RMX_OFF) + wm.vtaps = 2; + wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm.lb_size = lb_size; + wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); + wm.num_heads = num_heads; + + /* set for high clocks */ + latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); + /* set for low clocks */ + /* wm.yclk = low clk; wm.sclk = low clk */ + latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || + !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || + !evergreen_check_latency_hiding(&wm) || + (rdev->disp_priority == 2)) { + DRM_INFO("force priority to high\n"); + priority_a_cnt |= PRIORITY_ALWAYS_ON; + priority_b_cnt |= PRIORITY_ALWAYS_ON; + } + + a.full = dfixed_const(1000); + b.full = dfixed_const(mode->clock); + b.full = dfixed_div(b, a); + c.full = dfixed_const(latency_watermark_a); + c.full = dfixed_mul(c, b); + c.full = dfixed_mul(c, radeon_crtc->hsc); + c.full = dfixed_div(c, a); + a.full = dfixed_const(16); + c.full = dfixed_div(c, a); + priority_a_mark = dfixed_trunc(c); + priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; + + a.full = dfixed_const(1000); + b.full = dfixed_const(mode->clock); + b.full = dfixed_div(b, a); + c.full = dfixed_const(latency_watermark_b); + c.full = dfixed_mul(c, b); + c.full = dfixed_mul(c, radeon_crtc->hsc); + c.full = dfixed_div(c, a); + a.full = dfixed_const(16); + c.full = dfixed_div(c, a); + priority_b_mark = dfixed_trunc(c); + priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + } + + /* select wm A */ + arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); + tmp = arb_control3; + tmp &= ~LATENCY_WATERMARK_MASK(3); + tmp |= LATENCY_WATERMARK_MASK(1); + WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); + WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, + (LATENCY_LOW_WATERMARK(latency_watermark_a) | + LATENCY_HIGH_WATERMARK(line_time))); + /* select wm B */ + tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); + tmp &= ~LATENCY_WATERMARK_MASK(3); + tmp |= LATENCY_WATERMARK_MASK(2); + WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); + WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, + (LATENCY_LOW_WATERMARK(latency_watermark_b) | + LATENCY_HIGH_WATERMARK(line_time))); + /* restore original selection */ + WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3); + + /* write the priority marks */ + WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); + WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); + +} + void evergreen_bandwidth_update(struct radeon_device *rdev) { - /* XXX */ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + u32 num_heads = 0, lb_size; + int i; + + radeon_update_display_priority(rdev); + + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i]->base.enabled) + num_heads++; + } + for (i = 0; i < rdev->num_crtc; i += 2) { + mode0 = &rdev->mode_info.crtcs[i]->base.mode; + mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; + lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); + evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); + lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); + evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); + } } static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) @@ -677,7 +1113,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) static int evergreen_cp_start(struct radeon_device *rdev) { - int r; + int r, i; uint32_t cp_me; r = radeon_ring_lock(rdev, 7); @@ -697,16 +1133,39 @@ static int evergreen_cp_start(struct radeon_device *rdev) cp_me = 0xff; WREG32(CP_ME_CNTL, cp_me); - r = radeon_ring_lock(rdev, 4); + r = radeon_ring_lock(rdev, evergreen_default_size + 15); if (r) { DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); return r; } - /* init some VGT regs */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); - radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2); - radeon_ring_write(rdev, 0xe); - radeon_ring_write(rdev, 0x10); + + /* setup clear context state */ + radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + for (i = 0; i < evergreen_default_size; i++) + radeon_ring_write(rdev, evergreen_default_state[i]); + + radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); + + /* set clear context state */ + radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); + radeon_ring_write(rdev, 0); + + /* SQ_VTX_BASE_VTX_LOC */ + radeon_ring_write(rdev, 0xc0026f00); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + + /* Clear consts */ + radeon_ring_write(rdev, 0xc0036f00); + radeon_ring_write(rdev, 0x00000bc4); + radeon_ring_write(rdev, 0xffffffff); + radeon_ring_write(rdev, 0xffffffff); + radeon_ring_write(rdev, 0xffffffff); + radeon_ring_unlock_commit(rdev); return 0; @@ -731,7 +1190,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) /* Set ring buffer size */ rb_bufsz = drm_order(rdev->cp.ring_size / 8); - tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -745,8 +1204,19 @@ int evergreen_cp_resume(struct radeon_device *rdev) WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_WPTR, 0); - WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); - WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); + + /* set the wb address wether it's enabled or not */ + WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); + WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); + WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); + + if (rdev->wb.enabled) + WREG32(SCRATCH_UMSK, 0xff); + else { + tmp |= RB_NO_UPDATE; + WREG32(SCRATCH_UMSK, 0); + } + mdelay(1); WREG32(CP_RB_CNTL, tmp); @@ -1584,6 +2054,7 @@ int evergreen_irq_set(struct radeon_device *rdev) if (rdev->irq.sw_int) { DRM_DEBUG("evergreen_irq_set: sw int\n"); cp_int_cntl |= RB_INT_ENABLE; + cp_int_cntl |= TIME_STAMP_INT_ENABLE; } if (rdev->irq.crtc_vblank_int[0]) { DRM_DEBUG("evergreen_irq_set: vblank 0\n"); @@ -1760,8 +2231,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) { u32 wptr, tmp; - /* XXX use writeback */ - wptr = RREG32(IH_RB_WPTR); + if (rdev->wb.enabled) + wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; + else + wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { /* When a ring buffer overflow happen start parsing interrupt @@ -2000,6 +2473,7 @@ restart_ih: break; case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); + radeon_fence_process(rdev); break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: CP EOP\n"); @@ -2048,26 +2522,18 @@ static int evergreen_startup(struct radeon_device *rdev) return r; } evergreen_gpu_init(rdev); -#if 0 - if (!rdev->r600_blit.shader_obj) { - r = r600_blit_init(rdev); - if (r) { - DRM_ERROR("radeon: failed blitter (%d).\n", r); - return r; - } + + r = evergreen_blit_init(rdev); + if (r) { + evergreen_blit_fini(rdev); + rdev->asic->copy = NULL; + dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - DRM_ERROR("failed to pin blit object %d\n", r); - return r; - } -#endif /* Enable IRQ */ r = r600_irq_init(rdev); @@ -2087,8 +2553,6 @@ static int evergreen_startup(struct radeon_device *rdev) r = evergreen_cp_resume(rdev); if (r) return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); return 0; } @@ -2122,23 +2586,43 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { -#if 0 int r; -#endif + /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->cp.ready = false; evergreen_irq_suspend(rdev); - r600_wb_disable(rdev); + radeon_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); -#if 0 + /* unpin shaders bo */ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (likely(r == 0)) { radeon_bo_unpin(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); } -#endif + + return 0; +} + +int evergreen_copy_blit(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_pages, struct radeon_fence *fence) +{ + int r; + + mutex_lock(&rdev->r600_blit.mutex); + rdev->r600_blit.vb_ib = NULL; + r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); + if (r) { + if (rdev->r600_blit.vb_ib) + radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); + mutex_unlock(&rdev->r600_blit.mutex); + return r; + } + evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); + evergreen_blit_done_copy(rdev, fence); + mutex_unlock(&rdev->r600_blit.mutex); return 0; } @@ -2246,8 +2730,8 @@ int evergreen_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -2269,10 +2753,10 @@ int evergreen_init(struct radeon_device *rdev) void evergreen_fini(struct radeon_device *rdev) { - /*r600_blit_fini(rdev);*/ + evergreen_blit_fini(rdev); r700_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c new file mode 100644 index 000000000000..086b9b0416c4 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -0,0 +1,772 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Alex Deucher + */ + +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon.h" + +#include "evergreend.h" +#include "evergreen_blit_shaders.h" + +#define DI_PT_RECTLIST 0x11 +#define DI_INDEX_SIZE_16_BIT 0x0 +#define DI_SRC_SEL_AUTO_INDEX 0x2 + +#define FMT_8 0x1 +#define FMT_5_6_5 0x8 +#define FMT_8_8_8_8 0x1a +#define COLOR_8 0x1 +#define COLOR_5_6_5 0x8 +#define COLOR_8_8_8_8 0x1a + +/* emits 17 */ +static void +set_render_target(struct radeon_device *rdev, int format, + int w, int h, u64 gpu_addr) +{ + u32 cb_color_info; + int pitch, slice; + + h = ALIGN(h, 8); + if (h < 8) + h = 8; + + cb_color_info = ((format << 2) | (1 << 24)); + pitch = (w / 8) - 1; + slice = ((w * h) / 64) - 1; + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); + radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, pitch); + radeon_ring_write(rdev, slice); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, cb_color_info); + radeon_ring_write(rdev, (1 << 4)); + radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16)); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); +} + +/* emits 5dw */ +static void +cp_set_surface_sync(struct radeon_device *rdev, + u32 sync_type, u32 size, + u64 mc_addr) +{ + u32 cp_coher_size; + + if (size == 0xffffffff) + cp_coher_size = 0xffffffff; + else + cp_coher_size = ((size + 255) >> 8); + + radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); + radeon_ring_write(rdev, sync_type); + radeon_ring_write(rdev, cp_coher_size); + radeon_ring_write(rdev, mc_addr >> 8); + radeon_ring_write(rdev, 10); /* poll interval */ +} + +/* emits 11dw + 1 surface sync = 16dw */ +static void +set_shaders(struct radeon_device *rdev) +{ + u64 gpu_addr; + + /* VS */ + gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); + radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, 2); + radeon_ring_write(rdev, 0); + + /* PS */ + gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); + radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, 1); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 2); + + gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; + cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); +} + +/* emits 10 + 1 sync (5) = 15 */ +static void +set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) +{ + u32 sq_vtx_constant_word2, sq_vtx_constant_word3; + + /* high addr, stride */ + sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); + /* xyzw swizzles */ + sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); + radeon_ring_write(rdev, 0x580); + radeon_ring_write(rdev, gpu_addr & 0xffffffff); + radeon_ring_write(rdev, 48 - 1); /* size */ + radeon_ring_write(rdev, sq_vtx_constant_word2); + radeon_ring_write(rdev, sq_vtx_constant_word3); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); + + if (rdev->family == CHIP_CEDAR) + cp_set_surface_sync(rdev, + PACKET3_TC_ACTION_ENA, 48, gpu_addr); + else + cp_set_surface_sync(rdev, + PACKET3_VC_ACTION_ENA, 48, gpu_addr); + +} + +/* emits 10 */ +static void +set_tex_resource(struct radeon_device *rdev, + int format, int w, int h, int pitch, + u64 gpu_addr) +{ + u32 sq_tex_resource_word0, sq_tex_resource_word1; + u32 sq_tex_resource_word4, sq_tex_resource_word7; + + if (h < 1) + h = 1; + + sq_tex_resource_word0 = (1 << 0); /* 2D */ + sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | + ((w - 1) << 18)); + sq_tex_resource_word1 = ((h - 1) << 0); + /* xyzw swizzles */ + sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); + + sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30); + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, sq_tex_resource_word0); + radeon_ring_write(rdev, sq_tex_resource_word1); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, gpu_addr >> 8); + radeon_ring_write(rdev, sq_tex_resource_word4); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, sq_tex_resource_word7); +} + +/* emits 12 */ +static void +set_scissors(struct radeon_device *rdev, int x1, int y1, + int x2, int y2) +{ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); + radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); + radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); + + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); + radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); +} + +/* emits 10 */ +static void +draw_auto(struct radeon_device *rdev) +{ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); + radeon_ring_write(rdev, DI_PT_RECTLIST); + + radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); + radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); + + radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); + radeon_ring_write(rdev, 1); + + radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); + radeon_ring_write(rdev, 3); + radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX); + +} + +/* emits 30 */ +static void +set_default_state(struct radeon_device *rdev) +{ + u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; + u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; + u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; + int num_ps_gprs, num_vs_gprs, num_temp_gprs; + int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs; + int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; + int num_hs_threads, num_ls_threads; + int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; + int num_hs_stack_entries, num_ls_stack_entries; + + switch (rdev->family) { + case CHIP_CEDAR: + default: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 96; + num_vs_threads = 16; + num_gs_threads = 16; + num_es_threads = 16; + num_hs_threads = 16; + num_ls_threads = 16; + num_ps_stack_entries = 42; + num_vs_stack_entries = 42; + num_gs_stack_entries = 42; + num_es_stack_entries = 42; + num_hs_stack_entries = 42; + num_ls_stack_entries = 42; + break; + case CHIP_REDWOOD: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 20; + num_gs_threads = 20; + num_es_threads = 20; + num_hs_threads = 20; + num_ls_threads = 20; + num_ps_stack_entries = 42; + num_vs_stack_entries = 42; + num_gs_stack_entries = 42; + num_es_stack_entries = 42; + num_hs_stack_entries = 42; + num_ls_stack_entries = 42; + break; + case CHIP_JUNIPER: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 20; + num_gs_threads = 20; + num_es_threads = 20; + num_hs_threads = 20; + num_ls_threads = 20; + num_ps_stack_entries = 85; + num_vs_stack_entries = 85; + num_gs_stack_entries = 85; + num_es_stack_entries = 85; + num_hs_stack_entries = 85; + num_ls_stack_entries = 85; + break; + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 20; + num_gs_threads = 20; + num_es_threads = 20; + num_hs_threads = 20; + num_ls_threads = 20; + num_ps_stack_entries = 85; + num_vs_stack_entries = 85; + num_gs_stack_entries = 85; + num_es_stack_entries = 85; + num_hs_stack_entries = 85; + num_ls_stack_entries = 85; + break; + } + + if (rdev->family == CHIP_CEDAR) + sq_config = 0; + else + sq_config = VC_ENABLE; + + sq_config |= (EXPORT_SRC_C | + CS_PRIO(0) | + LS_PRIO(0) | + HS_PRIO(0) | + PS_PRIO(0) | + VS_PRIO(1) | + GS_PRIO(2) | + ES_PRIO(3)); + + sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | + NUM_VS_GPRS(num_vs_gprs) | + NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); + sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | + NUM_ES_GPRS(num_es_gprs)); + sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) | + NUM_LS_GPRS(num_ls_gprs)); + sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | + NUM_VS_THREADS(num_vs_threads) | + NUM_GS_THREADS(num_gs_threads) | + NUM_ES_THREADS(num_es_threads)); + sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) | + NUM_LS_THREADS(num_ls_threads)); + sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | + NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); + sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | + NUM_ES_STACK_ENTRIES(num_es_stack_entries)); + sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) | + NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); + + /* set clear context state */ + radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); + radeon_ring_write(rdev, 0); + + /* disable dyn gprs */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); + radeon_ring_write(rdev, 0); + + /* SQ config */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); + radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); + radeon_ring_write(rdev, sq_config); + radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); + radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); + radeon_ring_write(rdev, sq_gpr_resource_mgmt_3); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, sq_thread_resource_mgmt); + radeon_ring_write(rdev, sq_thread_resource_mgmt_2); + radeon_ring_write(rdev, sq_stack_resource_mgmt_1); + radeon_ring_write(rdev, sq_stack_resource_mgmt_2); + radeon_ring_write(rdev, sq_stack_resource_mgmt_3); + + /* CONTEXT_CONTROL */ + radeon_ring_write(rdev, 0xc0012800); + radeon_ring_write(rdev, 0x80000000); + radeon_ring_write(rdev, 0x80000000); + + /* SQ_VTX_BASE_VTX_LOC */ + radeon_ring_write(rdev, 0xc0026f00); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + + /* SET_SAMPLER */ + radeon_ring_write(rdev, 0xc0036e00); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000012); + radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(rdev, 0x00000000); + +} + +static inline uint32_t i2f(uint32_t input) +{ + u32 result, i, exponent, fraction; + + if ((input & 0x3fff) == 0) + result = 0; /* 0 is a special case */ + else { + exponent = 140; /* exponent biased by 127; */ + fraction = (input & 0x3fff) << 10; /* cheat and only + handle numbers below 2^^15 */ + for (i = 0; i < 14; i++) { + if (fraction & 0x800000) + break; + else { + fraction = fraction << 1; /* keep + shifting left until top bit = 1 */ + exponent = exponent - 1; + } + } + result = exponent << 23 | (fraction & 0x7fffff); /* mask + off top bit; assumed 1 */ + } + return result; +} + +int evergreen_blit_init(struct radeon_device *rdev) +{ + u32 obj_size; + int r; + void *ptr; + + /* pin copy shader into vram if already initialized */ + if (rdev->r600_blit.shader_obj) + goto done; + + mutex_init(&rdev->r600_blit.mutex); + rdev->r600_blit.state_offset = 0; + rdev->r600_blit.state_len = 0; + obj_size = 0; + + rdev->r600_blit.vs_offset = obj_size; + obj_size += evergreen_vs_size * 4; + obj_size = ALIGN(obj_size, 256); + + rdev->r600_blit.ps_offset = obj_size; + obj_size += evergreen_ps_size * 4; + obj_size = ALIGN(obj_size, 256); + + r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_obj); + if (r) { + DRM_ERROR("evergreen failed to allocate shader\n"); + return r; + } + + DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", + obj_size, + rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); + + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); + if (r) { + DRM_ERROR("failed to map blit object %d\n", r); + return r; + } + + memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); + memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); + radeon_bo_kunmap(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + +done: + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + dev_err(rdev->dev, "(%d) pin blit object failed\n", r); + return r; + } + return 0; +} + +void evergreen_blit_fini(struct radeon_device *rdev) +{ + int r; + + if (rdev->r600_blit.shader_obj == NULL) + return; + /* If we can't reserve the bo, unref should be enough to destroy + * it when it becomes idle. + */ + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (!r) { + radeon_bo_unpin(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + } + radeon_bo_unref(&rdev->r600_blit.shader_obj); +} + +static int evergreen_vb_ib_get(struct radeon_device *rdev) +{ + int r; + r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); + if (r) { + DRM_ERROR("failed to get IB for vertex buffer\n"); + return r; + } + + rdev->r600_blit.vb_total = 64*1024; + rdev->r600_blit.vb_used = 0; + return 0; +} + +static void evergreen_vb_ib_put(struct radeon_device *rdev) +{ + radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); + radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); +} + +int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) +{ + int r; + int ring_size, line_size; + int max_size; + /* loops of emits + fence emit possible */ + int dwords_per_loop = 74, num_loops; + + r = evergreen_vb_ib_get(rdev); + if (r) + return r; + + /* 8 bpp vs 32 bpp for xfer unit */ + if (size_bytes & 3) + line_size = 8192; + else + line_size = 8192 * 4; + + max_size = 8192 * line_size; + + /* major loops cover the max size transfer */ + num_loops = ((size_bytes + max_size) / max_size); + /* minor loops cover the extra non aligned bits */ + num_loops += ((size_bytes % line_size) ? 1 : 0); + /* calculate number of loops correctly */ + ring_size = num_loops * dwords_per_loop; + /* set default + shaders */ + ring_size += 46; /* shaders + def state */ + ring_size += 10; /* fence emit for VB IB */ + ring_size += 5; /* done copy */ + ring_size += 10; /* fence emit for done copy */ + r = radeon_ring_lock(rdev, ring_size); + if (r) + return r; + + set_default_state(rdev); /* 30 */ + set_shaders(rdev); /* 16 */ + return 0; +} + +void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) +{ + int r; + + if (rdev->r600_blit.vb_ib) + evergreen_vb_ib_put(rdev); + + if (fence) + r = radeon_fence_emit(rdev, fence); + + radeon_ring_unlock_commit(rdev); +} + +void evergreen_kms_blit_copy(struct radeon_device *rdev, + u64 src_gpu_addr, u64 dst_gpu_addr, + int size_bytes) +{ + int max_bytes; + u64 vb_gpu_addr; + u32 *vb; + + DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, + size_bytes, rdev->r600_blit.vb_used); + vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); + if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { + max_bytes = 8192; + + while (size_bytes) { + int cur_size = size_bytes; + int src_x = src_gpu_addr & 255; + int dst_x = dst_gpu_addr & 255; + int h = 1; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; + + if (!src_x && !dst_x) { + h = (cur_size / max_bytes); + if (h > 8192) + h = 8192; + if (h == 0) + h = 1; + else + cur_size = max_bytes; + } else { + if (cur_size > max_bytes) + cur_size = max_bytes; + if (cur_size > (max_bytes - dst_x)) + cur_size = (max_bytes - dst_x); + if (cur_size > (max_bytes - src_x)) + cur_size = (max_bytes - src_x); + } + + if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { + WARN_ON(1); + } + + vb[0] = i2f(dst_x); + vb[1] = 0; + vb[2] = i2f(src_x); + vb[3] = 0; + + vb[4] = i2f(dst_x); + vb[5] = i2f(h); + vb[6] = i2f(src_x); + vb[7] = i2f(h); + + vb[8] = i2f(dst_x + cur_size); + vb[9] = i2f(h); + vb[10] = i2f(src_x + cur_size); + vb[11] = i2f(h); + + /* src 10 */ + set_tex_resource(rdev, FMT_8, + src_x + cur_size, h, src_x + cur_size, + src_gpu_addr); + + /* 5 */ + cp_set_surface_sync(rdev, + PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); + + + /* dst 17 */ + set_render_target(rdev, COLOR_8, + dst_x + cur_size, h, + dst_gpu_addr); + + /* scissors 12 */ + set_scissors(rdev, dst_x, 0, dst_x + cur_size, h); + + /* 15 */ + vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; + set_vtx_resource(rdev, vb_gpu_addr); + + /* draw 10 */ + draw_auto(rdev); + + /* 5 */ + cp_set_surface_sync(rdev, + PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, + cur_size * h, dst_gpu_addr); + + vb += 12; + rdev->r600_blit.vb_used += 12 * 4; + + src_gpu_addr += cur_size * h; + dst_gpu_addr += cur_size * h; + size_bytes -= cur_size * h; + } + } else { + max_bytes = 8192 * 4; + + while (size_bytes) { + int cur_size = size_bytes; + int src_x = (src_gpu_addr & 255); + int dst_x = (dst_gpu_addr & 255); + int h = 1; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; + + if (!src_x && !dst_x) { + h = (cur_size / max_bytes); + if (h > 8192) + h = 8192; + if (h == 0) + h = 1; + else + cur_size = max_bytes; + } else { + if (cur_size > max_bytes) + cur_size = max_bytes; + if (cur_size > (max_bytes - dst_x)) + cur_size = (max_bytes - dst_x); + if (cur_size > (max_bytes - src_x)) + cur_size = (max_bytes - src_x); + } + + if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { + WARN_ON(1); + } + + vb[0] = i2f(dst_x / 4); + vb[1] = 0; + vb[2] = i2f(src_x / 4); + vb[3] = 0; + + vb[4] = i2f(dst_x / 4); + vb[5] = i2f(h); + vb[6] = i2f(src_x / 4); + vb[7] = i2f(h); + + vb[8] = i2f((dst_x + cur_size) / 4); + vb[9] = i2f(h); + vb[10] = i2f((src_x + cur_size) / 4); + vb[11] = i2f(h); + + /* src 10 */ + set_tex_resource(rdev, FMT_8_8_8_8, + (src_x + cur_size) / 4, + h, (src_x + cur_size) / 4, + src_gpu_addr); + /* 5 */ + cp_set_surface_sync(rdev, + PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); + + /* dst 17 */ + set_render_target(rdev, COLOR_8_8_8_8, + (dst_x + cur_size) / 4, h, + dst_gpu_addr); + + /* scissors 12 */ + set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h); + + /* Vertex buffer setup 15 */ + vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; + set_vtx_resource(rdev, vb_gpu_addr); + + /* draw 10 */ + draw_auto(rdev); + + /* 5 */ + cp_set_surface_sync(rdev, + PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, + cur_size * h, dst_gpu_addr); + + /* 74 ring dwords per loop */ + vb += 12; + rdev->r600_blit.vb_used += 12 * 4; + + src_gpu_addr += cur_size * h; + dst_gpu_addr += cur_size * h; + size_bytes -= cur_size * h; + } + } +} + diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c new file mode 100644 index 000000000000..ef1d28c07fbf --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c @@ -0,0 +1,348 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Alex Deucher + */ + +#include +#include + +/* + * evergreen cards need to use the 3D engine to blit data which requires + * quite a bit of hw state setup. Rather than pull the whole 3D driver + * (which normally generates the 3D state) into the DRM, we opt to use + * statically generated state tables. The regsiter state and shaders + * were hand generated to support blitting functionality. See the 3D + * driver or documentation for descriptions of the registers and + * shader instructions. + */ + +const u32 evergreen_default_state[] = +{ + 0xc0016900, + 0x0000023b, + 0x00000000, /* SQ_LDS_ALLOC_PS */ + + 0xc0066900, + 0x00000240, + 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + + 0xc0046900, + 0x00000247, + 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ + 0x00000000, + 0x00000000, + 0x00000000, + + 0xc0026900, + 0x00000010, + 0x00000000, /* DB_Z_INFO */ + 0x00000000, /* DB_STENCIL_INFO */ + + 0xc0016900, + 0x00000200, + 0x00000000, /* DB_DEPTH_CONTROL */ + + 0xc0066900, + 0x00000000, + 0x00000060, /* DB_RENDER_CONTROL */ + 0x00000000, /* DB_COUNT_CONTROL */ + 0x00000000, /* DB_DEPTH_VIEW */ + 0x0000002a, /* DB_RENDER_OVERRIDE */ + 0x00000000, /* DB_RENDER_OVERRIDE2 */ + 0x00000000, /* DB_HTILE_DATA_BASE */ + + 0xc0026900, + 0x0000000a, + 0x00000000, /* DB_STENCIL_CLEAR */ + 0x00000000, /* DB_DEPTH_CLEAR */ + + 0xc0016900, + 0x000002dc, + 0x0000aa00, /* DB_ALPHA_TO_MASK */ + + 0xc0016900, + 0x00000080, + 0x00000000, /* PA_SC_WINDOW_OFFSET */ + + 0xc00d6900, + 0x00000083, + 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ + 0x00000000, /* PA_SC_CLIPRECT_0_TL */ + 0x20002000, /* PA_SC_CLIPRECT_0_BR */ + 0x00000000, + 0x20002000, + 0x00000000, + 0x20002000, + 0x00000000, + 0x20002000, + 0xaaaaaaaa, /* PA_SC_EDGERULE */ + 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ + 0x0000000f, /* CB_TARGET_MASK */ + 0x0000000f, /* CB_SHADER_MASK */ + + 0xc0226900, + 0x00000094, + 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ + 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x80000000, + 0x20002000, + 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ + 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ + + 0xc0016900, + 0x000000d4, + 0x00000000, /* SX_MISC */ + + 0xc0026900, + 0x00000292, + 0x00000000, /* PA_SC_MODE_CNTL_0 */ + 0x00000000, /* PA_SC_MODE_CNTL_1 */ + + 0xc0106900, + 0x00000300, + 0x00000000, /* PA_SC_LINE_CNTL */ + 0x00000000, /* PA_SC_AA_CONFIG */ + 0x00000005, /* PA_SU_VTX_CNTL */ + 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ + 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ + 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ + 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ + 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */ + 0xffffffff, /* PA_SC_AA_MASK */ + + 0xc00d6900, + 0x00000202, + 0x00cc0010, /* CB_COLOR_CONTROL */ + 0x00000210, /* DB_SHADER_CONTROL */ + 0x00010000, /* PA_CL_CLIP_CNTL */ + 0x00000004, /* PA_SU_SC_MODE_CNTL */ + 0x00000100, /* PA_CL_VTE_CNTL */ + 0x00000000, /* PA_CL_VS_OUT_CNTL */ + 0x00000000, /* PA_CL_NANINF_CNTL */ + 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ + 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ + 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */ + + 0xc0066900, + 0x000002de, + 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + + 0xc0016900, + 0x00000229, + 0x00000000, /* SQ_PGM_START_FS */ + + 0xc0016900, + 0x0000022a, + 0x00000000, /* SQ_PGM_RESOURCES_FS */ + + 0xc0096900, + 0x00000100, + 0x00ffffff, /* VGT_MAX_VTX_INDX */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* SX_ALPHA_TEST_CONTROL */ + 0x00000000, /* CB_BLEND_RED */ + 0x00000000, /* CB_BLEND_GREEN */ + 0x00000000, /* CB_BLEND_BLUE */ + 0x00000000, /* CB_BLEND_ALPHA */ + + 0xc0026900, + 0x000002a8, + 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ + 0x00000000, /* */ + + 0xc0026900, + 0x000002ad, + 0x00000000, /* VGT_REUSE_OFF */ + 0x00000000, /* */ + + 0xc0116900, + 0x00000280, + 0x00000000, /* PA_SU_POINT_SIZE */ + 0x00000000, /* PA_SU_POINT_MINMAX */ + 0x00000008, /* PA_SU_LINE_CNTL */ + 0x00000000, /* PA_SC_LINE_STIPPLE */ + 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ + 0x00000000, /* VGT_HOS_CNTL */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* VGT_GS_MODE */ + + 0xc0016900, + 0x000002a1, + 0x00000000, /* VGT_PRIMITIVEID_EN */ + + 0xc0016900, + 0x000002a5, + 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ + + 0xc0016900, + 0x000002d5, + 0x00000000, /* VGT_SHADER_STAGES_EN */ + + 0xc0026900, + 0x000002e5, + 0x00000000, /* VGT_STRMOUT_CONFIG */ + 0x00000000, /* */ + + 0xc0016900, + 0x000001e0, + 0x00000000, /* CB_BLEND0_CONTROL */ + + 0xc0016900, + 0x000001b1, + 0x00000000, /* SPI_VS_OUT_CONFIG */ + + 0xc0016900, + 0x00000187, + 0x00000000, /* SPI_VS_OUT_ID_0 */ + + 0xc0016900, + 0x00000191, + 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ + + 0xc00b6900, + 0x000001b3, + 0x20000001, /* SPI_PS_IN_CONTROL_0 */ + 0x00000000, /* SPI_PS_IN_CONTROL_1 */ + 0x00000000, /* SPI_INTERP_CONTROL_0 */ + 0x00000000, /* SPI_INPUT_Z */ + 0x00000000, /* SPI_FOG_CNTL */ + 0x00100000, /* SPI_BARYC_CNTL */ + 0x00000000, /* SPI_PS_IN_CONTROL_2 */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + 0x00000000, /* */ + + 0xc0026900, + 0x00000316, + 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ + 0x00000010, /* */ +}; + +const u32 evergreen_vs[] = +{ + 0x00000004, + 0x80800400, + 0x0000a03c, + 0x95000688, + 0x00004000, + 0x15200688, + 0x00000000, + 0x00000000, + 0x3c000000, + 0x67961001, + 0x00080000, + 0x00000000, + 0x1c000000, + 0x67961000, + 0x00000008, + 0x00000000, +}; + +const u32 evergreen_ps[] = +{ + 0x00000003, + 0xa00c0000, + 0x00000008, + 0x80400000, + 0x00000000, + 0x95200688, + 0x00380400, + 0x00146b10, + 0x00380000, + 0x20146b10, + 0x00380400, + 0x40146b00, + 0x80380000, + 0x60146b00, + 0x00000000, + 0x00000000, + 0x00000010, + 0x000d1000, + 0xb0800000, + 0x00000000, +}; + +const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps); +const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs); +const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h new file mode 100644 index 000000000000..bb8d6c751595 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h @@ -0,0 +1,35 @@ +/* + * Copyright 2009 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef EVERGREEN_BLIT_SHADERS_H +#define EVERGREEN_BLIT_SHADERS_H + +extern const u32 evergreen_ps[]; +extern const u32 evergreen_vs[]; +extern const u32 evergreen_default_state[]; + +extern const u32 evergreen_ps_size, evergreen_vs_size; +extern const u32 evergreen_default_size; + +#endif diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 9b7532dd30f7..113c70cc8b39 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -412,6 +412,19 @@ #define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) +/* display watermarks */ +#define DC_LB_MEMORY_SPLIT 0x6b0c +#define PRIORITY_A_CNT 0x6b18 +#define PRIORITY_MARK_MASK 0x7fff +#define PRIORITY_OFF (1 << 16) +#define PRIORITY_ALWAYS_ON (1 << 20) +#define PRIORITY_B_CNT 0x6b1c +#define PIPE0_ARBITRATION_CONTROL3 0x0bf0 +# define LATENCY_WATERMARK_MASK(x) ((x) << 16) +#define PIPE0_LATENCY_CONTROL 0x0bf4 +# define LATENCY_LOW_WATERMARK(x) ((x) << 0) +# define LATENCY_HIGH_WATERMARK(x) ((x) << 16) + #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ @@ -645,6 +658,8 @@ #define PACKET3_EVENT_WRITE_EOP 0x47 #define PACKET3_EVENT_WRITE_EOS 0x48 #define PACKET3_PREAMBLE_CNTL 0x4A +# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) +# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) #define PACKET3_RB_OFFSET 0x4B #define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C #define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D @@ -802,6 +817,11 @@ #define SQ_ALU_CONST_CACHE_LS_14 0x28f78 #define SQ_ALU_CONST_CACHE_LS_15 0x28f7c +#define PA_SC_SCREEN_SCISSOR_TL 0x28030 +#define PA_SC_GENERIC_SCISSOR_TL 0x28240 +#define PA_SC_WINDOW_SCISSOR_TL 0x28204 +#define VGT_PRIMITIVE_TYPE 0x8958 + #define DB_DEPTH_CONTROL 0x28800 #define DB_DEPTH_VIEW 0x28008 #define DB_HTILE_DATA_BASE 0x28014 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e59422320bb6..6d1540c0bfed 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -675,67 +675,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, RADEON_SW_INT_FIRE); } -int r100_wb_init(struct radeon_device *rdev) -{ - int r; - - if (rdev->wb.wb_obj == NULL) { - r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, - &rdev->wb.wb_obj); - if (r) { - dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); - return r; - } - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); - if (r) { - dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); - radeon_bo_unreserve(rdev->wb.wb_obj); - return r; - } - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); - radeon_bo_unreserve(rdev->wb.wb_obj); - if (r) { - dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); - return r; - } - } - WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); - WREG32(R_00070C_CP_RB_RPTR_ADDR, - S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); - WREG32(R_000770_SCRATCH_UMSK, 0xff); - return 0; -} - -void r100_wb_disable(struct radeon_device *rdev) -{ - WREG32(R_000770_SCRATCH_UMSK, 0); -} - -void r100_wb_fini(struct radeon_device *rdev) -{ - int r; - - r100_wb_disable(rdev); - if (rdev->wb.wb_obj) { - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) { - dev_err(rdev->dev, "(%d) can't finish WB\n", r); - return; - } - radeon_bo_kunmap(rdev->wb.wb_obj); - radeon_bo_unpin(rdev->wb.wb_obj); - radeon_bo_unreserve(rdev->wb.wb_obj); - radeon_bo_unref(&rdev->wb.wb_obj); - rdev->wb.wb = NULL; - rdev->wb.wb_obj = NULL; - } -} - int r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, @@ -996,20 +935,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | - REG_SET(RADEON_MAX_FETCH, max_fetch) | - RADEON_RB_NO_UPDATE); + REG_SET(RADEON_MAX_FETCH, max_fetch)); #ifdef __BIG_ENDIAN tmp |= RADEON_BUF_SWAP_32BIT; #endif - WREG32(RADEON_CP_RB_CNTL, tmp); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); /* Set ring address */ DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); /* Force read & write ptr to 0 */ - WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); WREG32(RADEON_CP_RB_RPTR_WR, 0); WREG32(RADEON_CP_RB_WPTR, 0); + + /* set the wb address whether it's enabled or not */ + WREG32(R_00070C_CP_RB_RPTR_ADDR, + S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); + WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); + + if (rdev->wb.enabled) + WREG32(R_000770_SCRATCH_UMSK, 0xff); + else { + tmp |= RADEON_RB_NO_UPDATE; + WREG32(R_000770_SCRATCH_UMSK, 0); + } + WREG32(RADEON_CP_RB_CNTL, tmp); udelay(10); rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); @@ -1052,6 +1003,7 @@ void r100_cp_disable(struct radeon_device *rdev) rdev->cp.ready = false; WREG32(RADEON_CP_CSQ_MODE, 0); WREG32(RADEON_CP_CSQ_CNTL, 0); + WREG32(R_000770_SCRATCH_UMSK, 0); if (r100_gui_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); @@ -2318,6 +2270,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev) /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - * Novell bug 204882 + along with lots of ubuntu ones */ + if (rdev->mc.aper_size > config_aper_size) + config_aper_size = rdev->mc.aper_size; + if (config_aper_size > rdev->mc.real_vram_size) rdev->mc.mc_vram_size = config_aper_size; else @@ -3737,6 +3692,12 @@ static int r100_startup(struct radeon_device *rdev) if (r) return r; } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -3746,9 +3707,6 @@ static int r100_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -3782,7 +3740,7 @@ int r100_resume(struct radeon_device *rdev) int r100_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_disable(rdev); @@ -3792,7 +3750,7 @@ int r100_suspend(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCI) @@ -3905,7 +3863,7 @@ int r100_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCI) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index c827738ad7dd..34527e600fe9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1332,6 +1332,12 @@ static int r300_startup(struct radeon_device *rdev) if (r) return r; } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -1341,9 +1347,6 @@ static int r300_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -1379,7 +1382,7 @@ int r300_resume(struct radeon_device *rdev) int r300_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); @@ -1391,7 +1394,7 @@ int r300_suspend(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) @@ -1484,7 +1487,7 @@ int r300_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 59f7bccc5be0..c387346f93a9 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -248,6 +248,12 @@ static int r420_startup(struct radeon_device *rdev) return r; } r420_pipes_init(rdev); + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -258,10 +264,6 @@ static int r420_startup(struct radeon_device *rdev) return r; } r420_cp_errata_init(rdev); - r = r100_wb_init(rdev); - if (r) { - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); - } r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -302,7 +304,7 @@ int r420_suspend(struct radeon_device *rdev) { r420_cp_errata_fini(rdev); r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); @@ -314,7 +316,7 @@ int r420_suspend(struct radeon_device *rdev) void r420_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) @@ -418,7 +420,7 @@ int r420_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 1458dee902dd..3c8677f9e385 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -181,6 +181,12 @@ static int r520_startup(struct radeon_device *rdev) if (r) return r; } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -190,9 +196,6 @@ static int r520_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -295,7 +298,7 @@ int r520_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 7b65e4efe8af..33952a12f0a3 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1608,8 +1608,11 @@ void r600_gpu_init(struct radeon_device *rdev) rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); - tiling_config |= GROUP_SIZE(0); - rdev->config.r600.tiling_group_size = 256; + tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); + if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) + rdev->config.r600.tiling_group_size = 512; + else + rdev->config.r600.tiling_group_size = 256; tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; if (tmp > 3) { tiling_config |= ROW_TILING(3); @@ -1920,6 +1923,7 @@ void r600_cp_stop(struct radeon_device *rdev) { rdev->mc.active_vram_size = rdev->mc.visible_vram_size; WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); + WREG32(SCRATCH_UMSK, 0); } int r600_init_microcode(struct radeon_device *rdev) @@ -2152,7 +2156,7 @@ int r600_cp_resume(struct radeon_device *rdev) /* Set ring buffer size */ rb_bufsz = drm_order(rdev->cp.ring_size / 8); - tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif @@ -2166,8 +2170,19 @@ int r600_cp_resume(struct radeon_device *rdev) WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_WPTR, 0); - WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); - WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); + + /* set the wb address whether it's enabled or not */ + WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); + WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); + WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); + + if (rdev->wb.enabled) + WREG32(SCRATCH_UMSK, 0xff); + else { + tmp |= RB_NO_UPDATE; + WREG32(SCRATCH_UMSK, 0); + } + mdelay(1); WREG32(CP_RB_CNTL, tmp); @@ -2219,9 +2234,10 @@ void r600_scratch_init(struct radeon_device *rdev) int i; rdev->scratch.num_reg = 7; + rdev->scratch.reg_base = SCRATCH_REG0; for (i = 0; i < rdev->scratch.num_reg; i++) { rdev->scratch.free[i] = true; - rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4); + rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); } } @@ -2265,88 +2281,34 @@ int r600_ring_test(struct radeon_device *rdev) return r; } -void r600_wb_disable(struct radeon_device *rdev) -{ - int r; - - WREG32(SCRATCH_UMSK, 0); - if (rdev->wb.wb_obj) { - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) - return; - radeon_bo_kunmap(rdev->wb.wb_obj); - radeon_bo_unpin(rdev->wb.wb_obj); - radeon_bo_unreserve(rdev->wb.wb_obj); - } -} - -void r600_wb_fini(struct radeon_device *rdev) -{ - r600_wb_disable(rdev); - if (rdev->wb.wb_obj) { - radeon_bo_unref(&rdev->wb.wb_obj); - rdev->wb.wb = NULL; - rdev->wb.wb_obj = NULL; - } -} - -int r600_wb_enable(struct radeon_device *rdev) -{ - int r; - - if (rdev->wb.wb_obj == NULL) { - r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); - if (r) { - dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); - return r; - } - r = radeon_bo_reserve(rdev->wb.wb_obj, false); - if (unlikely(r != 0)) { - r600_wb_fini(rdev); - return r; - } - r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); - if (r) { - radeon_bo_unreserve(rdev->wb.wb_obj); - dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); - r600_wb_fini(rdev); - return r; - } - r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); - radeon_bo_unreserve(rdev->wb.wb_obj); - if (r) { - dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); - r600_wb_fini(rdev); - return r; - } - } - WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF); - WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC); - WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF); - WREG32(SCRATCH_UMSK, 0xff); - return 0; -} - void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ - - radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); - radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); - /* wait for 3D idle clean */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); - radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); - /* Emit fence sequence & fire IRQ */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); - radeon_ring_write(rdev, fence->seq); - /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ - radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); - radeon_ring_write(rdev, RB_INT_STAT); + if (rdev->wb.use_event) { + u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + + (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); + /* EVENT_WRITE_EOP - flush caches, send int */ + radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); + radeon_ring_write(rdev, addr & 0xffffffff); + radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); + radeon_ring_write(rdev, fence->seq); + radeon_ring_write(rdev, 0); + } else { + radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); + radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); + /* wait for 3D idle clean */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); + radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); + /* Emit fence sequence & fire IRQ */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); + radeon_ring_write(rdev, fence->seq); + /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ + radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); + radeon_ring_write(rdev, RB_INT_STAT); + } } int r600_copy_blit(struct radeon_device *rdev, @@ -2428,19 +2390,12 @@ int r600_startup(struct radeon_device *rdev) rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } - /* pin copy shader into vram */ - if (rdev->r600_blit.shader_obj) { - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - dev_err(rdev->dev, "(%d) pin blit object failed\n", r); - return r; - } - } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -2459,8 +2414,7 @@ int r600_startup(struct radeon_device *rdev) r = r600_cp_resume(rdev); if (r) return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); + return 0; } @@ -2519,7 +2473,7 @@ int r600_suspend(struct radeon_device *rdev) r600_cp_stop(rdev); rdev->cp.ready = false; r600_irq_suspend(rdev); - r600_wb_disable(rdev); + radeon_wb_disable(rdev); r600_pcie_gart_disable(rdev); /* unpin shaders bo */ if (rdev->r600_blit.shader_obj) { @@ -2616,8 +2570,8 @@ int r600_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -2647,8 +2601,8 @@ void r600_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r600_blit_fini(rdev); r600_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); r600_pcie_gart_fini(rdev); radeon_agp_fini(rdev); @@ -2983,10 +2937,13 @@ int r600_irq_init(struct radeon_device *rdev) ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | IH_WPTR_OVERFLOW_CLEAR | (rb_bufsz << 1)); - /* WPTR writeback, not yet */ - /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ - WREG32(IH_RB_WPTR_ADDR_LO, 0); - WREG32(IH_RB_WPTR_ADDR_HI, 0); + + if (rdev->wb.enabled) + ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; + + /* set the writeback address whether it's enabled or not */ + WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); + WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); WREG32(IH_RB_CNTL, ih_rb_cntl); @@ -3070,6 +3027,7 @@ int r600_irq_set(struct radeon_device *rdev) if (rdev->irq.sw_int) { DRM_DEBUG("r600_irq_set: sw int\n"); cp_int_cntl |= RB_INT_ENABLE; + cp_int_cntl |= TIME_STAMP_INT_ENABLE; } if (rdev->irq.crtc_vblank_int[0]) { DRM_DEBUG("r600_irq_set: vblank 0\n"); @@ -3244,8 +3202,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) { u32 wptr, tmp; - /* XXX use writeback */ - wptr = RREG32(IH_RB_WPTR); + if (rdev->wb.enabled) + wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; + else + wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { /* When a ring buffer overflow happen start parsing interrupt @@ -3433,6 +3393,7 @@ restart_ih: break; case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); + radeon_fence_process(rdev); break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: CP EOP\n"); diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 3473c00781ff..8362974ef41a 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -472,9 +472,10 @@ int r600_blit_init(struct radeon_device *rdev) u32 packet2s[16]; int num_packet2s = 0; - /* don't reinitialize blit */ + /* pin copy shader into vram if already initialized */ if (rdev->r600_blit.shader_obj) - return 0; + goto done; + mutex_init(&rdev->r600_blit.mutex); rdev->r600_blit.state_offset = 0; @@ -532,6 +533,18 @@ int r600_blit_init(struct radeon_device *rdev) memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); + +done: + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + dev_err(rdev->dev, "(%d) pin blit object failed\n", r); + return r; + } rdev->mc.active_vram_size = rdev->mc.real_vram_size; return 0; } @@ -554,7 +567,7 @@ void r600_blit_fini(struct radeon_device *rdev) radeon_bo_unref(&rdev->r600_blit.shader_obj); } -int r600_vb_ib_get(struct radeon_device *rdev) +static int r600_vb_ib_get(struct radeon_device *rdev) { int r; r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); @@ -568,7 +581,7 @@ int r600_vb_ib_get(struct radeon_device *rdev) return 0; } -void r600_vb_ib_put(struct radeon_device *rdev) +static void r600_vb_ib_put(struct radeon_device *rdev) { radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); @@ -650,8 +663,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, int src_x = src_gpu_addr & 255; int dst_x = dst_gpu_addr & 255; int h = 1; - src_gpu_addr = src_gpu_addr & ~255; - dst_gpu_addr = dst_gpu_addr & ~255; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); @@ -672,17 +685,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { WARN_ON(1); - -#if 0 - r600_vb_ib_put(rdev); - - r600_nomm_put_vb(dev); - r600_nomm_get_vb(dev); - if (!dev_priv->blit_vb) - return; - set_shaders(dev); - vb = r600_nomm_get_vb_ptr(dev); -#endif } vb[0] = i2f(dst_x); @@ -744,8 +746,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, int src_x = (src_gpu_addr & 255); int dst_x = (dst_gpu_addr & 255); int h = 1; - src_gpu_addr = src_gpu_addr & ~255; - dst_gpu_addr = dst_gpu_addr & ~255; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); @@ -767,17 +769,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { WARN_ON(1); } -#if 0 - if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) { - r600_nomm_put_vb(dev); - r600_nomm_get_vb(dev); - if (!rdev->blit_vb) - return; - - set_shaders(dev); - vb = r600_nomm_get_vb_ptr(dev); - } -#endif vb[0] = i2f(dst_x / 4); vb[1] = 0; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 250a3a918193..7b294c127c5f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -170,6 +170,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) struct r600_cs_track *track = p->track; u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; volatile u32 *ib = p->ib->ptr; + unsigned array_mode; if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); @@ -185,12 +186,12 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) /* pitch is the number of 8x8 tiles per row */ pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; - height = size / (pitch * 8 * bpe); + slice_tile_max *= 64; + height = slice_tile_max / (pitch * 8); if (height > 8192) height = 8192; - if (height > 7) - height &= ~0x7; - switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) { + array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); + switch (array_mode) { case V_0280A0_ARRAY_LINEAR_GENERAL: /* technically height & 0x7 */ break; @@ -214,6 +215,9 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) __func__, __LINE__, pitch); return -EINVAL; } + /* avoid breaking userspace */ + if (height > 7) + height &= ~0x7; if (!IS_ALIGNED(height, 8)) { dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", __func__, __LINE__, height); @@ -222,13 +226,13 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) break; case V_0280A0_ARRAY_2D_TILED_THIN1: pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)); + (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8; if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", __func__, __LINE__, pitch); return -EINVAL; } - if (!IS_ALIGNED((height / 8), track->nbanks)) { + if (!IS_ALIGNED((height / 8), track->npipes)) { dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", __func__, __LINE__, height); return -EINVAL; @@ -243,8 +247,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) /* check offset */ tmp = height * pitch * 8 * bpe; if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { - dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]); - return -EINVAL; + if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { + /* the initial DDX does bad things with the CB size occasionally */ + /* it rounds up height too far for slice tile max but the BO is smaller */ + tmp = (height - 7) * 8 * bpe; + if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { + dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); + return -EINVAL; + } + } else { + dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); + return -EINVAL; + } } if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); @@ -361,13 +375,13 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) break; case V_028010_ARRAY_2D_TILED_THIN1: pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / bpe) * track->nbanks)); + (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", __func__, __LINE__, pitch); return -EINVAL; } - if ((height / 8) & (track->nbanks - 1)) { + if (!IS_ALIGNED((height / 8), track->npipes)) { dev_warn(p->dev, "%s:%d db height (%d) invalid\n", __func__, __LINE__, height); return -EINVAL; @@ -1138,7 +1152,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i break; case V_038000_ARRAY_2D_TILED_THIN1: pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / bpe) * track->nbanks)); + (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", __func__, __LINE__, pitch); diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 858a1920c0d7..966a793e225b 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -474,6 +474,7 @@ #define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58 #define VTX_REUSE_DEPTH_MASK 0x000000FF #define VGT_EVENT_INITIATOR 0x28a90 +# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0) # define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0) #define VM_CONTEXT0_CNTL 0x1410 @@ -775,7 +776,27 @@ #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 #define PACKET3_EVENT_WRITE 0x46 +#define EVENT_TYPE(x) ((x) << 0) +#define EVENT_INDEX(x) ((x) << 8) + /* 0 - any non-TS event + * 1 - ZPASS_DONE + * 2 - SAMPLE_PIPELINESTAT + * 3 - SAMPLE_STREAMOUTSTAT* + * 4 - *S_PARTIAL_FLUSH + * 5 - TS events + */ #define PACKET3_EVENT_WRITE_EOP 0x47 +#define DATA_SEL(x) ((x) << 29) + /* 0 - discard + * 1 - send low 32bit data + * 2 - send 64bit data + * 3 - send 64bit counter value + */ +#define INT_SEL(x) ((x) << 24) + /* 0 - none + * 1 - interrupt only (DATA_SEL = 0) + * 2 - interrupt when data write is confirmed + */ #define PACKET3_ONE_REG_WRITE 0x57 #define PACKET3_SET_CONFIG_REG 0x68 #define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9ff38c99a6ea..73f600d39ad4 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -88,7 +88,6 @@ extern int radeon_benchmarking; extern int radeon_testing; extern int radeon_connector_table; extern int radeon_tv; -extern int radeon_new_pll; extern int radeon_audio; extern int radeon_disp_priority; extern int radeon_hw_i2c; @@ -366,6 +365,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev); */ struct radeon_scratch { unsigned num_reg; + uint32_t reg_base; bool free[32]; uint32_t reg[32]; }; @@ -594,8 +594,15 @@ struct radeon_wb { struct radeon_bo *wb_obj; volatile uint32_t *wb; uint64_t gpu_addr; + bool enabled; + bool use_event; }; +#define RADEON_WB_SCRATCH_OFFSET 0 +#define RADEON_WB_CP_RPTR_OFFSET 1024 +#define R600_WB_IH_WPTR_OFFSET 2048 +#define R600_WB_EVENT_OFFSET 3072 + /** * struct radeon_pm - power management datas * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) @@ -1124,6 +1131,12 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) void r600_kms_blit_copy(struct radeon_device *rdev, u64 src_gpu_addr, u64 dst_gpu_addr, int size_bytes); +/* evergreen blit */ +int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); +void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); +void evergreen_kms_blit_copy(struct radeon_device *rdev, + u64 src_gpu_addr, u64 dst_gpu_addr, + int size_bytes); static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) { @@ -1341,6 +1354,9 @@ extern void radeon_update_bandwidth_info(struct radeon_device *rdev); extern void radeon_update_display_priority(struct radeon_device *rdev); extern bool radeon_boot_test_post_card(struct radeon_device *rdev); extern void radeon_scratch_init(struct radeon_device *rdev); +extern void radeon_wb_fini(struct radeon_device *rdev); +extern int radeon_wb_init(struct radeon_device *rdev); +extern void radeon_wb_disable(struct radeon_device *rdev); extern void radeon_surface_init(struct radeon_device *rdev); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); @@ -1425,9 +1441,6 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); extern int r600_ib_test(struct radeon_device *rdev); extern int r600_ring_test(struct radeon_device *rdev); -extern void r600_wb_fini(struct radeon_device *rdev); -extern int r600_wb_enable(struct radeon_device *rdev); -extern void r600_wb_disable(struct radeon_device *rdev); extern void r600_scratch_init(struct radeon_device *rdev); extern int r600_blit_init(struct radeon_device *rdev); extern void r600_blit_fini(struct radeon_device *rdev); @@ -1465,6 +1478,8 @@ extern void r700_cp_stop(struct radeon_device *rdev); extern void r700_cp_fini(struct radeon_device *rdev); extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); extern int evergreen_irq_set(struct radeon_device *rdev); +extern int evergreen_blit_init(struct radeon_device *rdev); +extern void evergreen_blit_fini(struct radeon_device *rdev); /* radeon_acpi.c */ #if defined(CONFIG_ACPI) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 25e1dd197791..64fb89ecbf74 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -726,9 +726,9 @@ static struct radeon_asic evergreen_asic = { .get_vblank_counter = &evergreen_get_vblank_counter, .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &evergreen_cs_parse, - .copy_blit = NULL, - .copy_dma = NULL, - .copy = NULL, + .copy_blit = &evergreen_copy_blit, + .copy_dma = &evergreen_copy_blit, + .copy = &evergreen_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index a5aff755f0d2..740988244143 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -108,9 +108,6 @@ void r100_irq_disable(struct radeon_device *rdev); void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); void r100_vram_init_sizes(struct radeon_device *rdev); -void r100_wb_disable(struct radeon_device *rdev); -void r100_wb_fini(struct radeon_device *rdev); -int r100_wb_init(struct radeon_device *rdev); int r100_cp_reset(struct radeon_device *rdev); void r100_vga_render_disable(struct radeon_device *rdev); void r100_restore_sanity(struct radeon_device *rdev); @@ -257,11 +254,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int r600_cs_parse(struct radeon_cs_parser *p); void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -int r600_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, - struct radeon_fence *fence); int r600_irq_process(struct radeon_device *rdev); int r600_irq_set(struct radeon_device *rdev); bool r600_gpu_is_lockup(struct radeon_device *rdev); @@ -307,6 +299,9 @@ int evergreen_resume(struct radeon_device *rdev); bool evergreen_gpu_is_lockup(struct radeon_device *rdev); int evergreen_asic_reset(struct radeon_device *rdev); void evergreen_bandwidth_update(struct radeon_device *rdev); +int evergreen_copy_blit(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_pages, struct radeon_fence *fence); void evergreen_hpd_init(struct radeon_device *rdev); void evergreen_hpd_fini(struct radeon_device *rdev); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 8e43ddae70cc..04cac7ec9039 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1112,8 +1112,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per * family. */ - if (!radeon_new_pll) - p1pll->pll_out_min = 64800; + p1pll->pll_out_min = 64800; } p1pll->pll_in_min = @@ -1277,36 +1276,27 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, return false; } -static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct - radeon_encoder - *encoder, - int id) +bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id) { - struct drm_device *dev = encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); - uint16_t data_offset; + uint16_t data_offset, size; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; uint8_t frev, crev; - struct radeon_atom_ss *ss = NULL; - int i; + int i, num_indices; - if (id > ATOM_MAX_SS_ENTRY) - return NULL; - - if (atom_parse_data_header(mode_info->atom_context, index, NULL, + memset(ss, 0, sizeof(struct radeon_atom_ss)); + if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { ss_info = (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); - ss = - kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); - if (!ss) - return NULL; - - for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) { + for (i = 0; i < num_indices; i++) { if (ss_info->asSS_Info[i].ucSS_Id == id) { ss->percentage = le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); @@ -1315,11 +1305,88 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct ss->delay = ss_info->asSS_Info[i].ucSS_Delay; ss->range = ss_info->asSS_Info[i].ucSS_Range; ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; - break; + return true; } } } - return ss; + return false; +} + +union asic_ss_info { + struct _ATOM_ASIC_INTERNAL_SS_INFO info; + struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; + struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; +}; + +bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id, u32 clock) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); + uint16_t data_offset, size; + union asic_ss_info *ss_info; + uint8_t frev, crev; + int i, num_indices; + + memset(ss, 0, sizeof(struct radeon_atom_ss)); + if (atom_parse_data_header(mode_info->atom_context, index, &size, + &frev, &crev, &data_offset)) { + + ss_info = + (union asic_ss_info *)(mode_info->atom_context->bios + data_offset); + + switch (frev) { + case 1: + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_ASIC_SS_ASSIGNMENT); + + for (i = 0; i < num_indices; i++) { + if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && + (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { + ss->percentage = + le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); + ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); + return true; + } + } + break; + case 2: + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); + for (i = 0; i < num_indices; i++) { + if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && + (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { + ss->percentage = + le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); + ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); + return true; + } + } + break; + case 3: + num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); + for (i = 0; i < num_indices; i++) { + if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && + (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { + ss->percentage = + le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); + ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; + ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); + return true; + } + } + break; + default: + DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev); + break; + } + + } + return false; } union lvds_info { @@ -1371,7 +1438,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); lvds->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); - lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; + lvds->lcd_misc = lvds_info->info.ucLVDS_Misc; misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) @@ -1388,19 +1455,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); - lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); - - if (ASIC_IS_AVIVO(rdev)) { - if (radeon_new_pll == 0) - lvds->pll_algo = PLL_ALGO_LEGACY; - else - lvds->pll_algo = PLL_ALGO_NEW; - } else { - if (radeon_new_pll == 1) - lvds->pll_algo = PLL_ALGO_NEW; - else - lvds->pll_algo = PLL_ALGO_LEGACY; - } + lvds->lcd_ss_id = lvds_info->info.ucSS_Id; encoder->native_mode = lvds->native_mode; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index ecc1a8fafbfd..4dac4b0a02ee 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -326,6 +326,34 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr } } + if (property == rdev->mode_info.underscan_hborder_property) { + /* need to find digital encoder on connector */ + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_encoder->underscan_hborder != val) { + radeon_encoder->underscan_hborder = val; + radeon_property_change_mode(&radeon_encoder->base); + } + } + + if (property == rdev->mode_info.underscan_vborder_property) { + /* need to find digital encoder on connector */ + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_encoder->underscan_vborder != val) { + radeon_encoder->underscan_vborder = val; + radeon_property_change_mode(&radeon_encoder->base); + } + } + if (property == rdev->mode_info.tv_std_property) { encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); if (!encoder) { @@ -635,6 +663,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; } } else { + + /* if we aren't forcing don't do destructive polling */ + if (!force) + return connector->status; + if (radeon_connector->dac_load_detect && encoder) { encoder_funcs = encoder->helper_private; ret = encoder_funcs->detect(encoder, connector); @@ -822,6 +855,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) goto out; + if (!force) { + ret = connector->status; + goto out; + } + /* find analog encoder */ if (radeon_connector->dac_load_detect) { for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { @@ -1153,10 +1191,17 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_AUTO); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_hborder_property, + 0); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_vborder_property, + 0); + } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, @@ -1181,10 +1226,17 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_AUTO); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_hborder_property, + 0); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_vborder_property, + 0); + } subpixel_order = SubPixelHorizontalRGB; break; case DRM_MODE_CONNECTOR_DisplayPort: @@ -1212,10 +1264,17 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, UNDERSCAN_AUTO); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_hborder_property, + 0); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_vborder_property, + 0); + } break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3eef567b0421..017ac54920fb 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -118,22 +118,25 @@ static void radeon_show_cursor(struct drm_crtc *crtc) } static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, - uint32_t gpu_addr) + uint64_t gpu_addr) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; if (ASIC_IS_DCE4(rdev)) { - WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0); - WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(gpu_addr)); + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + gpu_addr & 0xffffffff); } else if (ASIC_IS_AVIVO(rdev)) { if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) - WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); + WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); else - WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0); + WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); } - WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); + WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + gpu_addr & 0xffffffff); } else { radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; /* offset is from DISP(2)_BASE_ADDRESS */ diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 256d204a6d24..8adfedfe547f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -117,9 +117,10 @@ void radeon_scratch_init(struct radeon_device *rdev) } else { rdev->scratch.num_reg = 7; } + rdev->scratch.reg_base = RADEON_SCRATCH_REG0; for (i = 0; i < rdev->scratch.num_reg; i++) { rdev->scratch.free[i] = true; - rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); + rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); } } @@ -149,6 +150,86 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) } } +void radeon_wb_disable(struct radeon_device *rdev) +{ + int r; + + if (rdev->wb.wb_obj) { + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) + return; + radeon_bo_kunmap(rdev->wb.wb_obj); + radeon_bo_unpin(rdev->wb.wb_obj); + radeon_bo_unreserve(rdev->wb.wb_obj); + } + rdev->wb.enabled = false; +} + +void radeon_wb_fini(struct radeon_device *rdev) +{ + radeon_wb_disable(rdev); + if (rdev->wb.wb_obj) { + radeon_bo_unref(&rdev->wb.wb_obj); + rdev->wb.wb = NULL; + rdev->wb.wb_obj = NULL; + } +} + +int radeon_wb_init(struct radeon_device *rdev) +{ + int r; + + if (rdev->wb.wb_obj == NULL) { + r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, + RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); + if (r) { + dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); + return r; + } + } + r = radeon_bo_reserve(rdev->wb.wb_obj, false); + if (unlikely(r != 0)) { + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->wb.wb_obj); + dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } + r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); + radeon_bo_unreserve(rdev->wb.wb_obj); + if (r) { + dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); + radeon_wb_fini(rdev); + return r; + } + + /* disable event_write fences */ + rdev->wb.use_event = false; + /* disabled via module param */ + if (radeon_no_wb == 1) + rdev->wb.enabled = false; + else { + /* often unreliable on AGP */ + if (rdev->flags & RADEON_IS_AGP) { + rdev->wb.enabled = false; + } else { + rdev->wb.enabled = true; + /* event_write fences are only available on r600+ */ + if (rdev->family >= CHIP_R600) + rdev->wb.use_event = true; + } + } + + dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); + + return 0; +} + /** * radeon_vram_location - try to find VRAM location * @rdev: radeon device structure holding all necessary informations diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b92d2f2fcbed..0383631da69c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -454,13 +454,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) return n; } -static void radeon_compute_pll_legacy(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) +void radeon_compute_pll(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p) { uint32_t min_ref_div = pll->min_ref_div; uint32_t max_ref_div = pll->max_ref_div; @@ -513,7 +513,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, max_fractional_feed_div = pll->max_frac_feedback_div; } - for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { + for (post_div = max_post_div; post_div >= min_post_div; --post_div) { uint32_t ref_div; if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) @@ -631,214 +631,6 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, *post_div_p = best_post_div; } -static bool -calc_fb_div(struct radeon_pll *pll, - uint32_t freq, - uint32_t post_div, - uint32_t ref_div, - uint32_t *fb_div, - uint32_t *fb_div_frac) -{ - fixed20_12 feedback_divider, a, b; - u32 vco_freq; - - vco_freq = freq * post_div; - /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ - a.full = dfixed_const(pll->reference_freq); - feedback_divider.full = dfixed_const(vco_freq); - feedback_divider.full = dfixed_div(feedback_divider, a); - a.full = dfixed_const(ref_div); - feedback_divider.full = dfixed_mul(feedback_divider, a); - - if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { - /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ - a.full = dfixed_const(10); - feedback_divider.full = dfixed_mul(feedback_divider, a); - feedback_divider.full += dfixed_const_half(0); - feedback_divider.full = dfixed_floor(feedback_divider); - feedback_divider.full = dfixed_div(feedback_divider, a); - - /* *fb_div = floor(feedback_divider); */ - a.full = dfixed_floor(feedback_divider); - *fb_div = dfixed_trunc(a); - /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ - a.full = dfixed_const(10); - b.full = dfixed_mul(feedback_divider, a); - - feedback_divider.full = dfixed_floor(feedback_divider); - feedback_divider.full = dfixed_mul(feedback_divider, a); - feedback_divider.full = b.full - feedback_divider.full; - *fb_div_frac = dfixed_trunc(feedback_divider); - } else { - /* *fb_div = floor(feedback_divider + 0.5); */ - feedback_divider.full += dfixed_const_half(0); - feedback_divider.full = dfixed_floor(feedback_divider); - - *fb_div = dfixed_trunc(feedback_divider); - *fb_div_frac = 0; - } - - if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div)) - return false; - else - return true; -} - -static bool -calc_fb_ref_div(struct radeon_pll *pll, - uint32_t freq, - uint32_t post_div, - uint32_t *fb_div, - uint32_t *fb_div_frac, - uint32_t *ref_div) -{ - fixed20_12 ffreq, max_error, error, pll_out, a; - u32 vco; - u32 pll_out_min, pll_out_max; - - if (pll->flags & RADEON_PLL_IS_LCD) { - pll_out_min = pll->lcd_pll_out_min; - pll_out_max = pll->lcd_pll_out_max; - } else { - pll_out_min = pll->pll_out_min; - pll_out_max = pll->pll_out_max; - } - - ffreq.full = dfixed_const(freq); - /* max_error = ffreq * 0.0025; */ - a.full = dfixed_const(400); - max_error.full = dfixed_div(ffreq, a); - - for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { - if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { - vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); - vco = vco / ((*ref_div) * 10); - - if ((vco < pll_out_min) || (vco > pll_out_max)) - continue; - - /* pll_out = vco / post_div; */ - a.full = dfixed_const(post_div); - pll_out.full = dfixed_const(vco); - pll_out.full = dfixed_div(pll_out, a); - - if (pll_out.full >= ffreq.full) { - error.full = pll_out.full - ffreq.full; - if (error.full <= max_error.full) - return true; - } - } - } - return false; -} - -static void radeon_compute_pll_new(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) -{ - u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; - u32 best_freq = 0, vco_frequency; - u32 pll_out_min, pll_out_max; - - if (pll->flags & RADEON_PLL_IS_LCD) { - pll_out_min = pll->lcd_pll_out_min; - pll_out_max = pll->lcd_pll_out_max; - } else { - pll_out_min = pll->pll_out_min; - pll_out_max = pll->pll_out_max; - } - - /* freq = freq / 10; */ - do_div(freq, 10); - - if (pll->flags & RADEON_PLL_USE_POST_DIV) { - post_div = pll->post_div; - if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div)) - goto done; - - vco_frequency = freq * post_div; - if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) - goto done; - - if (pll->flags & RADEON_PLL_USE_REF_DIV) { - ref_div = pll->reference_div; - if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) - goto done; - if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) - goto done; - } - } else { - for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) { - if (pll->flags & RADEON_PLL_LEGACY) { - if ((post_div == 5) || - (post_div == 7) || - (post_div == 9) || - (post_div == 10) || - (post_div == 11)) - continue; - } - - if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) - continue; - - vco_frequency = freq * post_div; - if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max)) - continue; - if (pll->flags & RADEON_PLL_USE_REF_DIV) { - ref_div = pll->reference_div; - if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) - goto done; - if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) - break; - } else { - if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div)) - break; - } - } - } - - best_freq = pll->reference_freq * 10 * fb_div; - best_freq += pll->reference_freq * fb_div_frac; - best_freq = best_freq / (ref_div * post_div); - -done: - if (best_freq == 0) - DRM_ERROR("Couldn't find valid PLL dividers\n"); - - *dot_clock_p = best_freq / 10; - *fb_div_p = fb_div; - *frac_fb_div_p = fb_div_frac; - *ref_div_p = ref_div; - *post_div_p = post_div; - - DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); -} - -void radeon_compute_pll(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) -{ - switch (pll->algo) { - case PLL_ALGO_NEW: - radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p, - frac_fb_div_p, ref_div_p, post_div_p); - break; - case PLL_ALGO_LEGACY: - default: - radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p, - frac_fb_div_p, ref_div_p, post_div_p); - break; - } -} - static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); @@ -1002,6 +794,24 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) radeon_underscan_enum_list[i].name); } + rdev->mode_info.underscan_hborder_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_RANGE, + "underscan hborder", 2); + if (!rdev->mode_info.underscan_hborder_property) + return -ENOMEM; + rdev->mode_info.underscan_hborder_property->values[0] = 0; + rdev->mode_info.underscan_hborder_property->values[1] = 128; + + rdev->mode_info.underscan_vborder_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_RANGE, + "underscan vborder", 2); + if (!rdev->mode_info.underscan_vborder_property) + return -ENOMEM; + rdev->mode_info.underscan_vborder_property->values[0] = 0; + rdev->mode_info.underscan_vborder_property->values[1] = 128; + return 0; } @@ -1159,8 +969,14 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && drm_detect_hdmi_monitor(radeon_connector->edid) && is_hdtv_mode(mode)))) { - radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; - radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; + if (radeon_encoder->underscan_hborder != 0) + radeon_crtc->h_border = radeon_encoder->underscan_hborder; + else + radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; + if (radeon_encoder->underscan_vborder != 0) + radeon_crtc->v_border = radeon_encoder->underscan_vborder; + else + radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; radeon_crtc->rmx_type = RMX_FULL; src_v = crtc->mode.vdisplay; dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); @@ -1195,3 +1011,156 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, } return true; } + +/* + * Retrieve current video scanout position of crtc on a given gpu. + * + * \param rdev Device to query. + * \param crtc Crtc to query. + * \param *vpos Location where vertical scanout position should be stored. + * \param *hpos Location where horizontal scanout position should go. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * \return Flags, or'ed together as follows: + * + * RADEON_SCANOUTPOS_VALID = Query successfull. + * RADEON_SCANOUTPOS_INVBL = Inside vblank. + * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of + * this flag means that returned position may be offset by a constant but + * unknown small number of scanlines wrt. real scanout position. + * + */ +int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos) +{ + u32 stat_crtc = 0, vbl = 0, position = 0; + int vbl_start, vbl_end, vtotal, ret = 0; + bool in_vbl = true; + + if (ASIC_IS_DCE4(rdev)) { + if (crtc == 0) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC0_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC0_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 1) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC1_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC1_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 2) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC2_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC2_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 3) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC3_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC3_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 4) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC4_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC4_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 5) { + vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + + EVERGREEN_CRTC5_REGISTER_OFFSET); + position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + + EVERGREEN_CRTC5_REGISTER_OFFSET); + ret |= RADEON_SCANOUTPOS_VALID; + } + } else if (ASIC_IS_AVIVO(rdev)) { + if (crtc == 0) { + vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); + position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 1) { + vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); + position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); + ret |= RADEON_SCANOUTPOS_VALID; + } + } else { + /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ + if (crtc == 0) { + /* Assume vbl_end == 0, get vbl_start from + * upper 16 bits. + */ + vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) & + RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; + /* Only retrieve vpos from upper 16 bits, set hpos == 0. */ + position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; + stat_crtc = RREG32(RADEON_CRTC_STATUS); + if (!(stat_crtc & 1)) + in_vbl = false; + + ret |= RADEON_SCANOUTPOS_VALID; + } + if (crtc == 1) { + vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & + RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; + position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; + stat_crtc = RREG32(RADEON_CRTC2_STATUS); + if (!(stat_crtc & 1)) + in_vbl = false; + + ret |= RADEON_SCANOUTPOS_VALID; + } + } + + /* Decode into vertical and horizontal scanout position. */ + *vpos = position & 0x1fff; + *hpos = (position >> 16) & 0x1fff; + + /* Valid vblank area boundaries from gpu retrieved? */ + if (vbl > 0) { + /* Yes: Decode. */ + ret |= RADEON_SCANOUTPOS_ACCURATE; + vbl_start = vbl & 0x1fff; + vbl_end = (vbl >> 16) & 0x1fff; + } + else { + /* No: Fake something reasonable which gives at least ok results. */ + vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay; + vbl_end = 0; + } + + /* Test scanout position against vblank region. */ + if ((*vpos < vbl_start) && (*vpos >= vbl_end)) + in_vbl = false; + + /* Check if inside vblank area and apply corrective offsets: + * vpos will then be >=0 in video scanout area, but negative + * within vblank area, counting down the number of lines until + * start of scanout. + */ + + /* Inside "upper part" of vblank area? Apply corrective offset if so: */ + if (in_vbl && (*vpos >= vbl_start)) { + vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal; + *vpos = *vpos - vtotal; + } + + /* Correct for shifted end of vbl at vbl_end. */ + *vpos = *vpos - vbl_end; + + /* In vblank? */ + if (in_vbl) + ret |= RADEON_SCANOUTPOS_INVBL; + + return ret; +} diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 29c1237c2e7b..88e4ea925900 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -47,9 +47,10 @@ * - 2.4.0 - add crtc id query * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) + * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 6 +#define KMS_DRIVER_MINOR 7 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -93,7 +94,6 @@ int radeon_benchmarking = 0; int radeon_testing = 0; int radeon_connector_table = 0; int radeon_tv = 1; -int radeon_new_pll = -1; int radeon_audio = 1; int radeon_disp_priority = 0; int radeon_hw_i2c = 0; @@ -131,9 +131,6 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); module_param_named(tv, radeon_tv, int, 0444); -MODULE_PARM_DESC(new_pll, "Select new PLL code"); -module_param_named(new_pll, radeon_new_pll, int, 0444); - MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); module_param_named(audio, radeon_audio, int, 0444); @@ -203,8 +200,6 @@ static struct drm_driver driver_old = { .irq_uninstall = radeon_driver_irq_uninstall, .irq_handler = radeon_driver_irq_handler, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = radeon_ioctls, .dma_ioctl = radeon_cp_buffers, .fops = { @@ -291,8 +286,6 @@ static struct drm_driver kms_driver = { .irq_uninstall = radeon_driver_irq_uninstall_kms, .irq_handler = radeon_driver_irq_handler_kms, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = radeon_ioctls_kms, .gem_init_object = radeon_gem_object_init, .gem_free_object = radeon_gem_object_free, diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 2c293e8304d6..ae58b6849a2e 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -529,9 +529,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) + if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v1.ucMisc |= (1 << 1); } else { if (dig->linkb) @@ -558,18 +558,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) args.v2.ucTemporal = 0; args.v2.ucFRC = 0; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) + if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) { + if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; - if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; } - if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) { + if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; - if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) + if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; - if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) + if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; } } else { diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 40b0c087b592..efa211898fe6 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -59,6 +59,8 @@ static struct fb_ops radeonfb_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index b1f9a81b5d1d..216392d0353b 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -72,7 +72,15 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) bool wake = false; unsigned long cjiffies; - seq = RREG32(rdev->fence_drv.scratch_reg); + if (rdev->wb.enabled) { + u32 scratch_index; + if (rdev->wb.use_event) + scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + else + scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; + seq = rdev->wb.wb[scratch_index/4]; + } else + seq = RREG32(rdev->fence_drv.scratch_reg); if (seq != rdev->fence_drv.last_seq) { rdev->fence_drv.last_seq = seq; rdev->fence_drv.last_jiffies = jiffies; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 305049afde15..ace2e6384d40 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -347,11 +347,26 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) +{ + return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return radeon_crtc_do_set_base(crtc, fb, x, y, 1); +} + +int radeon_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_framebuffer *radeon_fb; + struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct radeon_bo *rbo; uint64_t base; @@ -364,14 +379,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, DRM_DEBUG_KMS("\n"); /* no fb bound */ - if (!crtc->fb) { + if (!atomic && !crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } - radeon_fb = to_radeon_framebuffer(crtc->fb); + if (atomic) { + radeon_fb = to_radeon_framebuffer(fb); + target_fb = fb; + } + else { + radeon_fb = to_radeon_framebuffer(crtc->fb); + target_fb = crtc->fb; + } - switch (crtc->fb->bits_per_pixel) { + switch (target_fb->bits_per_pixel) { case 8: format = 2; break; @@ -415,10 +437,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, crtc_offset_cntl = 0; - pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); - crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) + - ((crtc->fb->bits_per_pixel * 8) - 1)) / - (crtc->fb->bits_per_pixel * 8)); + pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8); + crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) + + ((target_fb->bits_per_pixel * 8) - 1)) / + (target_fb->bits_per_pixel * 8)); crtc_pitch |= crtc_pitch << 16; @@ -443,14 +465,14 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, crtc_tile_x0_y0 = x | (y << 16); base &= ~0x7ff; } else { - int byteshift = crtc->fb->bits_per_pixel >> 4; + int byteshift = target_fb->bits_per_pixel >> 4; int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); crtc_offset_cntl |= (y % 16); } } else { int offset = y * pitch_pixels + x; - switch (crtc->fb->bits_per_pixel) { + switch (target_fb->bits_per_pixel) { case 8: offset *= 1; break; @@ -496,8 +518,8 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset); WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch); - if (old_fb && old_fb != crtc->fb) { - radeon_fb = to_radeon_framebuffer(old_fb); + if (!atomic && fb && fb != crtc->fb) { + radeon_fb = to_radeon_framebuffer(fb); rbo = radeon_fb->obj->driver_private; r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) @@ -717,10 +739,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) pll = &rdev->clock.p1pll; pll->flags = RADEON_PLL_LEGACY; - if (radeon_new_pll == 1) - pll->algo = PLL_ALGO_NEW; - else - pll->algo = PLL_ALGO_LEGACY; if (mode->clock > 200000) /* range limits??? */ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; @@ -1040,6 +1058,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = { .mode_fixup = radeon_crtc_mode_fixup, .mode_set = radeon_crtc_mode_set, .mode_set_base = radeon_crtc_set_base, + .mode_set_base_atomic = radeon_crtc_set_base_atomic, .prepare = radeon_crtc_prepare, .commit = radeon_crtc_commit, .load_lut = radeon_crtc_load_lut, diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 454c1dc7ea45..92457163d070 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -149,12 +150,6 @@ struct radeon_tmds_pll { #define RADEON_PLL_USE_POST_DIV (1 << 12) #define RADEON_PLL_IS_LCD (1 << 13) -/* pll algo */ -enum radeon_pll_algo { - PLL_ALGO_LEGACY, - PLL_ALGO_NEW -}; - struct radeon_pll { /* reference frequency */ uint32_t reference_freq; @@ -187,8 +182,6 @@ struct radeon_pll { /* pll id */ uint32_t id; - /* pll algo */ - enum radeon_pll_algo algo; }; struct radeon_i2c_chan { @@ -240,6 +233,8 @@ struct radeon_mode_info { struct drm_property *tmds_pll_property; /* underscan */ struct drm_property *underscan_property; + struct drm_property *underscan_hborder_property; + struct drm_property *underscan_vborder_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; @@ -335,22 +330,24 @@ struct radeon_encoder_ext_tmds { struct radeon_atom_ss { uint16_t percentage; uint8_t type; - uint8_t step; + uint16_t step; uint8_t delay; uint8_t range; uint8_t refdiv; + /* asic_ss */ + uint16_t rate; + uint16_t amount; }; struct radeon_encoder_atom_dig { bool linkb; /* atom dig */ bool coherent_mode; - int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ - /* atom lvds */ - uint32_t lvds_misc; + int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */ + /* atom lvds/edp */ + uint32_t lcd_misc; uint16_t panel_pwr_delay; - enum radeon_pll_algo pll_algo; - struct radeon_atom_ss *ss; + uint32_t lcd_ss_id; /* panel mode */ struct drm_display_mode native_mode; }; @@ -369,6 +366,8 @@ struct radeon_encoder { uint32_t pixel_clock; enum radeon_rmx_type rmx_type; enum radeon_underscan_type underscan_type; + uint32_t underscan_hborder; + uint32_t underscan_vborder; struct drm_display_mode native_mode; void *enc_priv; int audio_polling_active; @@ -435,6 +434,11 @@ struct radeon_framebuffer { struct drm_gem_object *obj; }; +/* radeon_get_crtc_scanoutpos() return flags */ +#define RADEON_SCANOUTPOS_VALID (1 << 0) +#define RADEON_SCANOUTPOS_INVBL (1 << 1) +#define RADEON_SCANOUTPOS_ACCURATE (1 << 2) + extern enum radeon_tv_std radeon_combios_get_tv_info(struct radeon_device *rdev); extern enum radeon_tv_std @@ -490,6 +494,13 @@ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); +extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id); +extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id, u32 clock); + extern void radeon_compute_pll(struct radeon_pll *pll, uint64_t freq, uint32_t *dot_clock_p, @@ -513,6 +524,10 @@ extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); extern void radeon_crtc_load_lut(struct drm_crtc *crtc); extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); +extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, + enum mode_set_atomic state); extern int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -522,7 +537,13 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode); extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); - +extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, + enum mode_set_atomic state); +extern int radeon_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic); extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, @@ -531,6 +552,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); +extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos); + extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); extern struct edid * radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index b3b5306bb578..d7ab91416410 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) out: radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, - bo->tbo.mem.mm_node->start << PAGE_SHIFT, + bo->tbo.mem.start << PAGE_SHIFT, bo->tbo.num_pages << PAGE_SHIFT); return 0; } @@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rdev = rbo->rdev; if (bo->mem.mem_type == TTM_PL_VRAM) { size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.mm_node->start << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; if ((offset + size) > rdev->mc.visible_vram_size) { /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); @@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) r = ttm_bo_validate(bo, &rbo->placement, false, true, false); if (unlikely(r != 0)) return r; - offset = bo->mem.mm_node->start << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; /* this should not happen */ if ((offset + size) > rdev->mc.visible_vram_size) return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f87efec76236..8c9b2ef32c68 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -712,73 +712,21 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) static bool radeon_pm_in_vbl(struct radeon_device *rdev) { - u32 stat_crtc = 0, vbl = 0, position = 0; + int crtc, vpos, hpos, vbl_status; bool in_vbl = true; - if (ASIC_IS_DCE4(rdev)) { - if (rdev->pm.active_crtcs & (1 << 0)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 1)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 2)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 3)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 4)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 5)) { - vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + - EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; - position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + - EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; - } - } else if (ASIC_IS_AVIVO(rdev)) { - if (rdev->pm.active_crtcs & (1 << 0)) { - vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; - position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; - } - if (rdev->pm.active_crtcs & (1 << 1)) { - vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; - position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; - } - if (position < vbl && position > 1) - in_vbl = false; - } else { - if (rdev->pm.active_crtcs & (1 << 0)) { - stat_crtc = RREG32(RADEON_CRTC_STATUS); - if (!(stat_crtc & 1)) - in_vbl = false; - } - if (rdev->pm.active_crtcs & (1 << 1)) { - stat_crtc = RREG32(RADEON_CRTC2_STATUS); - if (!(stat_crtc & 1)) + /* Iterate over all active crtc's. All crtc's must be in vblank, + * otherwise return in_vbl == false. + */ + for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { + if (rdev->pm.active_crtcs & (1 << crtc)) { + vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos); + if ((vbl_status & RADEON_SCANOUTPOS_VALID) && + !(vbl_status & RADEON_SCANOUTPOS_INVBL)) in_vbl = false; } } - if (position < vbl && position > 1) - in_vbl = false; - return in_vbl; } diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 261e98a276db..6ea798ce8218 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -247,10 +247,14 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) */ void radeon_ring_free_size(struct radeon_device *rdev) { - if (rdev->family >= CHIP_R600) - rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); - else - rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); + if (rdev->wb.enabled) + rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; + else { + if (rdev->family >= CHIP_R600) + rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); + else + rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); + } /* This works because ring_size is a power of 2 */ rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); rdev->cp.ring_free_dw -= rdev->cp.wptr; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index a823d8fe54c2..fe95bb35317e 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: + man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.gtt_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; @@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */ + man->func = &ttm_bo_manager_func; man->gpu_offset = rdev->mc.vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; @@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - old_start = old_mem->mm_node->start << PAGE_SHIFT; - new_start = new_mem->mm_node->start << PAGE_SHIFT; + old_start = old_mem->start << PAGE_SHIFT; + new_start = new_mem->start << PAGE_SHIFT; switch (old_mem->mem_type) { case TTM_PL_VRAM: @@ -326,14 +328,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, } r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out_cleanup: - if (tmp_mem.mm_node) { - struct ttm_bo_global *glob = rdev->mman.bdev.glob; - - spin_lock(&glob->lru_lock); - drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&glob->lru_lock); - return r; - } + ttm_bo_mem_put(bo, &tmp_mem); return r; } @@ -372,14 +367,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, goto out_cleanup; } out_cleanup: - if (tmp_mem.mm_node) { - struct ttm_bo_global *glob = rdev->mman.bdev.glob; - - spin_lock(&glob->lru_lock); - drm_mm_put_block(tmp_mem.mm_node); - spin_unlock(&glob->lru_lock); - return r; - } + ttm_bo_mem_put(bo, &tmp_mem); return r; } @@ -449,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */ - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; } #endif break; case TTM_PL_VRAM: - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) return -EINVAL; @@ -699,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, int r; gtt = container_of(backend, struct radeon_ttm_backend, backend); - gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; + gtt->offset = bo_mem->start << PAGE_SHIFT; if (!gtt->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); } @@ -798,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) radeon_mem_types_list[i].show = &radeon_mm_dump_table; radeon_mem_types_list[i].driver_features = 0; if (i == 0) - radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; + radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; else - radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; + radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; } /* Add ttm page pool to debugfs */ diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index f78fd592544d..ac40fd39d787 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -22,6 +22,10 @@ evergreen 0x9400 0x00008B10 PA_SC_LINE_STIPPLE_STATE 0x00008BF0 PA_SC_ENHANCE 0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ +0x00008D90 SQ_DYN_GPR_OPTIMIZATION +0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN +0x00008D98 SQ_DYN_GPR_THREAD_LIMIT +0x00008D9C SQ_DYN_GPR_LDS_LIMIT 0x00008C00 SQ_CONFIG 0x00008C04 SQ_GPR_RESOURCE_MGMT_1 0x00008C08 SQ_GPR_RESOURCE_MGMT_2 @@ -34,6 +38,10 @@ evergreen 0x9400 0x00008C24 SQ_STACK_RESOURCE_MGMT_2 0x00008C28 SQ_STACK_RESOURCE_MGMT_3 0x00008DF8 SQ_CONST_MEM_BASE +0x00008E20 SQ_STATIC_THREAD_MGMT_1 +0x00008E24 SQ_STATIC_THREAD_MGMT_2 +0x00008E28 SQ_STATIC_THREAD_MGMT_3 +0x00008E2C SQ_LDS_RESOURCE_MGMT 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index ae2b76b9a388..f683e51a2a06 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -397,6 +397,12 @@ static int rs400_startup(struct radeon_device *rdev) r = rs400_gart_enable(rdev); if (r) return r; + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r100_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -406,9 +412,6 @@ static int rs400_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -443,7 +446,7 @@ int rs400_resume(struct radeon_device *rdev) int rs400_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); r100_irq_disable(rdev); rs400_gart_disable(rdev); return 0; @@ -452,7 +455,7 @@ int rs400_suspend(struct radeon_device *rdev) void rs400_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rs400_gart_fini(rdev); @@ -526,7 +529,7 @@ int rs400_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 51d5f7b5ab21..b091a1f6fa4e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -796,6 +796,12 @@ static int rs600_startup(struct radeon_device *rdev) r = rs600_gart_enable(rdev); if (r) return r; + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -805,9 +811,6 @@ static int rs600_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -848,7 +851,7 @@ int rs600_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); rs600_irq_disable(rdev); rs600_gart_disable(rdev); return 0; @@ -858,7 +861,7 @@ void rs600_fini(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rs600_gart_fini(rdev); @@ -932,7 +935,7 @@ int rs600_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); rs600_gart_fini(rdev); radeon_irq_kms_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 4dc2a87ea680..0137d3e3728d 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -616,6 +616,12 @@ static int rs690_startup(struct radeon_device *rdev) r = rs400_gart_enable(rdev); if (r) return r; + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -625,9 +631,6 @@ static int rs690_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -668,7 +671,7 @@ int rs690_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); rs600_irq_disable(rdev); rs400_gart_disable(rdev); return 0; @@ -678,7 +681,7 @@ void rs690_fini(struct radeon_device *rdev) { r600_audio_fini(rdev); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rs400_gart_fini(rdev); @@ -753,7 +756,7 @@ int rs690_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); rs400_gart_fini(rdev); radeon_irq_kms_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 4d6e86041a9f..5d569f41f4ae 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -386,6 +386,12 @@ static int rv515_startup(struct radeon_device *rdev) if (r) return r; } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ rs600_irq_set(rdev); rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); @@ -395,9 +401,6 @@ static int rv515_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing CP (%d).\n", r); return r; } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); r = r100_ib_init(rdev); if (r) { dev_err(rdev->dev, "failled initializing IB (%d).\n", r); @@ -431,7 +434,7 @@ int rv515_resume(struct radeon_device *rdev) int rv515_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); - r100_wb_disable(rdev); + radeon_wb_disable(rdev); rs600_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); @@ -447,7 +450,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev) void rv515_fini(struct radeon_device *rdev) { r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_gem_fini(rdev); rv370_pcie_gart_fini(rdev); @@ -527,7 +530,7 @@ int rv515_init(struct radeon_device *rdev) /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); r100_cp_fini(rdev); - r100_wb_fini(rdev); + radeon_wb_fini(rdev); r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); rv370_pcie_gart_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 9490da700749..245374e2b778 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -269,6 +269,7 @@ void r700_cp_stop(struct radeon_device *rdev) { rdev->mc.active_vram_size = rdev->mc.visible_vram_size; WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); + WREG32(SCRATCH_UMSK, 0); } static int rv770_cp_load_microcode(struct radeon_device *rdev) @@ -643,10 +644,11 @@ static void rv770_gpu_init(struct radeon_device *rdev) else gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); - - gb_tiling_config |= GROUP_SIZE(0); - rdev->config.rv770.tiling_group_size = 256; - + gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); + if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) + rdev->config.rv770.tiling_group_size = 512; + else + rdev->config.rv770.tiling_group_size = 256; if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { gb_tiling_config |= ROW_TILING(3); gb_tiling_config |= SAMPLE_SPLIT(3); @@ -1030,19 +1032,12 @@ static int rv770_startup(struct radeon_device *rdev) rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } - /* pin copy shader into vram */ - if (rdev->r600_blit.shader_obj) { - r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); - if (unlikely(r != 0)) - return r; - r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - radeon_bo_unreserve(rdev->r600_blit.shader_obj); - if (r) { - DRM_ERROR("failed to pin blit object %d\n", r); - return r; - } - } + + /* allocate wb buffer */ + r = radeon_wb_init(rdev); + if (r) + return r; + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -1061,8 +1056,7 @@ static int rv770_startup(struct radeon_device *rdev) r = r600_cp_resume(rdev); if (r) return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); + return 0; } @@ -1108,7 +1102,7 @@ int rv770_suspend(struct radeon_device *rdev) r700_cp_stop(rdev); rdev->cp.ready = false; r600_irq_suspend(rdev); - r600_wb_disable(rdev); + radeon_wb_disable(rdev); rv770_pcie_gart_disable(rdev); /* unpin shaders bo */ if (rdev->r600_blit.shader_obj) { @@ -1203,8 +1197,8 @@ int rv770_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); rdev->accel_working = false; @@ -1236,8 +1230,8 @@ void rv770_fini(struct radeon_device *rdev) { r600_blit_fini(rdev); r700_cp_fini(rdev); - r600_wb_fini(rdev); r600_irq_fini(rdev); + radeon_wb_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); rv770_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 2a2830f5a840..fa64d25d4248 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -42,8 +42,6 @@ static struct drm_driver driver = { .lastclose = savage_driver_lastclose, .unload = savage_driver_unload, .reclaim_buffers = savage_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = savage_ioctls, .dma_ioctl = savage_bci_buffers, .fops = { diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 4bb10ef6676a..4caf5d01cfd3 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -67,13 +67,10 @@ static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, .load = sis_driver_load, .unload = sis_driver_unload, - .context_dtor = NULL, .dma_quiescent = sis_idle, .reclaim_buffers = NULL, .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked, .lastclose = sis_lastclose, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = sis_ioctls, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index 640567ef713d..b70fa91d761a 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -42,8 +42,6 @@ static struct pci_device_id pciidlist[] = { static struct drm_driver driver = { .driver_features = DRIVER_USE_MTRR, .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .fops = { .owner = THIS_MODULE, .open = drm_open, diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b256d4adfafe..f3cf6f02c997 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,6 +4,7 @@ ccflags-y := -Iinclude/drm ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o + ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ + ttm_bo_manager.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 4bf69c404491..f999e36f30b4 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) { struct ttm_agp_backend *agp_be = container_of(backend, struct ttm_agp_backend, backend); + struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem = agp_be->mem; int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); int ret; @@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) mem->is_flushed = 1; mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; - ret = agp_bind_memory(mem, bo_mem->mm_node->start); + ret = agp_bind_memory(mem, node->start); if (ret) printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index db809e034cc4..a1cb783c7131 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) man->available_caching); printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", man->default_caching); - if (mem_type != TTM_PL_SYSTEM) { - spin_lock(&bdev->glob->lru_lock); - drm_mm_debug_table(&man->manager, TTM_PFX); - spin_unlock(&bdev->glob->lru_lock); - } + if (mem_type != TTM_PL_SYSTEM) + (*man->func->debug)(man, TTM_PFX); } static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, @@ -169,18 +166,13 @@ static void ttm_bo_release_list(struct kref *list_kref) int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) { - if (interruptible) { - int ret = 0; - - ret = wait_event_interruptible(bo->event_queue, + return wait_event_interruptible(bo->event_queue, atomic_read(&bo->reserved) == 0); - if (unlikely(ret != 0)) - return ret; } else { wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); + return 0; } - return 0; } EXPORT_SYMBOL(ttm_bo_wait_unreserved); @@ -421,7 +413,7 @@ moved: if (bo->mem.mm_node) { spin_lock(&bo->lock); - bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + + bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; spin_unlock(&bo->lock); @@ -442,135 +434,144 @@ out_err: } /** - * Call bo::reserved and with the lru lock held. + * Call bo::reserved. * Will release GPU memory type usage on destruction. - * This is the place to put in driver specific hooks. - * Will release the bo::reserved lock and the - * lru lock on exit. + * This is the place to put in driver specific hooks to release + * driver private resources. + * Will release the bo::reserved lock. */ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { - struct ttm_bo_global *glob = bo->glob; - if (bo->ttm) { - - /** - * Release the lru_lock, since we don't want to have - * an atomic requirement on ttm_tt[unbind|destroy]. - */ - - spin_unlock(&glob->lru_lock); ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; - spin_lock(&glob->lru_lock); } - if (bo->mem.mm_node) { - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - } + ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); - spin_unlock(&glob->lru_lock); } - -/** - * If bo idle, remove from delayed- and lru lists, and unref. - * If not idle, and already on delayed list, do nothing. - * If not idle, and not on delayed list, put on delayed list, - * up the list_kref and schedule a delayed list check. - */ - -static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) +static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; - struct ttm_bo_driver *driver = bdev->driver; + struct ttm_bo_driver *driver; + void *sync_obj; + void *sync_obj_arg; + int put_count; int ret; spin_lock(&bo->lock); -retry: - (void) ttm_bo_wait(bo, false, false, !remove_all); - + (void) ttm_bo_wait(bo, false, false, true); if (!bo->sync_obj) { - int put_count; - - spin_unlock(&bo->lock); spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); /** - * Someone else has the object reserved. Bail and retry. + * Lock inversion between bo::reserve and bo::lock here, + * but that's OK, since we're only trylocking. */ - if (unlikely(ret == -EBUSY)) { - spin_unlock(&glob->lru_lock); - spin_lock(&bo->lock); - goto requeue; - } + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - /** - * We can re-check for sync object without taking - * the bo::lock since setting the sync object requires - * also bo::reserved. A busy object at this point may - * be caused by another thread starting an accelerated - * eviction. - */ - - if (unlikely(bo->sync_obj)) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); - spin_unlock(&glob->lru_lock); - spin_lock(&bo->lock); - if (remove_all) - goto retry; - else - goto requeue; - } + if (unlikely(ret == -EBUSY)) + goto queue; + spin_unlock(&bo->lock); put_count = ttm_bo_del_from_lru(bo); - if (!list_empty(&bo->ddestroy)) { - list_del_init(&bo->ddestroy); - ++put_count; - } - + spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); - return 0; - } -requeue: - spin_lock(&glob->lru_lock); - if (list_empty(&bo->ddestroy)) { - void *sync_obj = bo->sync_obj; - void *sync_obj_arg = bo->sync_obj_arg; - - kref_get(&bo->list_kref); - list_add_tail(&bo->ddestroy, &bdev->ddestroy); - spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); - - if (sync_obj) - driver->sync_obj_flush(sync_obj, sync_obj_arg); - schedule_delayed_work(&bdev->wq, - ((HZ / 100) < 1) ? 1 : HZ / 100); - ret = 0; - + return; } else { + spin_lock(&glob->lru_lock); + } +queue: + sync_obj = bo->sync_obj; + sync_obj_arg = bo->sync_obj_arg; + driver = bdev->driver; + + kref_get(&bo->list_kref); + list_add_tail(&bo->ddestroy, &bdev->ddestroy); + spin_unlock(&glob->lru_lock); + spin_unlock(&bo->lock); + + if (sync_obj) + driver->sync_obj_flush(sync_obj, sync_obj_arg); + schedule_delayed_work(&bdev->wq, + ((HZ / 100) < 1) ? 1 : HZ / 100); +} + +/** + * function ttm_bo_cleanup_refs + * If bo idle, remove from delayed- and lru lists, and unref. + * If not idle, do nothing. + * + * @interruptible Any sleeps should occur interruptibly. + * @no_wait_reserve Never wait for reserve. Return -EBUSY instead. + * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. + */ + +static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait_reserve, + bool no_wait_gpu) +{ + struct ttm_bo_global *glob = bo->glob; + int put_count; + int ret = 0; + +retry: + spin_lock(&bo->lock); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); + spin_unlock(&bo->lock); + + if (unlikely(ret != 0)) + return ret; + + spin_lock(&glob->lru_lock); + ret = ttm_bo_reserve_locked(bo, interruptible, + no_wait_reserve, false, 0); + + if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); - ret = -EBUSY; + return ret; } - return ret; + /** + * We can re-check for sync object without taking + * the bo::lock since setting the sync object requires + * also bo::reserved. A busy object at this point may + * be caused by another thread recently starting an accelerated + * eviction. + */ + + if (unlikely(bo->sync_obj)) { + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + spin_unlock(&glob->lru_lock); + goto retry; + } + + put_count = ttm_bo_del_from_lru(bo); + list_del_init(&bo->ddestroy); + ++put_count; + + spin_unlock(&glob->lru_lock); + ttm_bo_cleanup_memtype_use(bo); + + while (put_count--) + kref_put(&bo->list_kref, ttm_bo_ref_bug); + + return 0; } /** @@ -602,7 +603,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) } spin_unlock(&glob->lru_lock); - ret = ttm_bo_cleanup_refs(entry, remove_all); + ret = ttm_bo_cleanup_refs(entry, false, !remove_all, + !remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); entry = nentry; @@ -645,7 +647,7 @@ static void ttm_bo_release(struct kref *kref) bo->vm_node = NULL; } write_unlock(&bdev->vm_lock); - ttm_bo_cleanup_refs(bo, false); + ttm_bo_cleanup_refs_or_queue(bo); kref_put(&bo->list_kref, ttm_bo_release_list); write_lock(&bdev->vm_lock); } @@ -680,7 +682,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bo->glob; struct ttm_mem_reg evict_mem; struct ttm_placement placement; int ret = 0; @@ -726,12 +727,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, if (ret) { if (ret != -ERESTARTSYS) printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); - spin_lock(&glob->lru_lock); - if (evict_mem.mm_node) { - drm_mm_put_block(evict_mem.mm_node); - evict_mem.mm_node = NULL; - } - spin_unlock(&glob->lru_lock); + ttm_bo_mem_put(bo, &evict_mem); goto out; } bo->evicted = true; @@ -759,6 +755,18 @@ retry: bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); kref_get(&bo->list_kref); + if (!list_empty(&bo->ddestroy)) { + spin_unlock(&glob->lru_lock); + ret = ttm_bo_cleanup_refs(bo, interruptible, + no_wait_reserve, no_wait_gpu); + kref_put(&bo->list_kref, ttm_bo_release_list); + + if (likely(ret == 0 || ret == -ERESTARTSYS)) + return ret; + + goto retry; + } + ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); if (unlikely(ret == -EBUSY)) { @@ -792,41 +800,14 @@ retry: return ret; } -static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, - struct ttm_mem_type_manager *man, - struct ttm_placement *placement, - struct ttm_mem_reg *mem, - struct drm_mm_node **node) +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { - struct ttm_bo_global *glob = bo->glob; - unsigned long lpfn; - int ret; + struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; - lpfn = placement->lpfn; - if (!lpfn) - lpfn = man->size; - *node = NULL; - do { - ret = drm_mm_pre_get(&man->manager); - if (unlikely(ret)) - return ret; - - spin_lock(&glob->lru_lock); - *node = drm_mm_search_free_in_range(&man->manager, - mem->num_pages, mem->page_alignment, - placement->fpfn, lpfn, 1); - if (unlikely(*node == NULL)) { - spin_unlock(&glob->lru_lock); - return 0; - } - *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, - mem->page_alignment, - placement->fpfn, - lpfn); - spin_unlock(&glob->lru_lock); - } while (*node == NULL); - return 0; + if (mem->mm_node) + (*man->func->put_node)(man, mem); } +EXPORT_SYMBOL(ttm_bo_mem_put); /** * Repeatedly evict memory from the LRU for @mem_type until we create enough @@ -843,14 +824,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - struct drm_mm_node *node; int ret; do { - ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); + ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret != 0)) return ret; - if (node) + if (mem->mm_node) break; spin_lock(&glob->lru_lock); if (list_empty(&man->lru)) { @@ -863,9 +843,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, if (unlikely(ret != 0)) return ret; } while (1); - if (node == NULL) + if (mem->mm_node == NULL) return -ENOMEM; - mem->mm_node = node; mem->mem_type = mem_type; return 0; } @@ -939,7 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool type_found = false; bool type_ok = false; bool has_erestartsys = false; - struct drm_mm_node *node = NULL; int i, ret; mem->mm_node = NULL; @@ -973,17 +951,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (man->has_type && man->use_type) { type_found = true; - ret = ttm_bo_man_get_node(bo, man, placement, mem, - &node); + ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret)) return ret; } - if (node) + if (mem->mm_node) break; } - if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { - mem->mm_node = node; + if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { mem->mem_type = mem_type; mem->placement = cur_flags; return 0; @@ -1053,7 +1029,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { - struct ttm_bo_global *glob = bo->glob; int ret = 0; struct ttm_mem_reg mem; @@ -1081,11 +1056,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, goto out_unlock; ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); out_unlock: - if (ret && mem.mm_node) { - spin_lock(&glob->lru_lock); - drm_mm_put_block(mem.mm_node); - spin_unlock(&glob->lru_lock); - } + if (ret && mem.mm_node) + ttm_bo_mem_put(bo, &mem); return ret; } @@ -1093,11 +1065,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i; - struct drm_mm_node *node = mem->mm_node; - if (node && placement->lpfn != 0 && - (node->start < placement->fpfn || - node->start + node->size > placement->lpfn)) + if (mem->mm_node && placement->lpfn != 0 && + (mem->start < placement->fpfn || + mem->start + mem->num_pages > placement->lpfn)) return -1; for (i = 0; i < placement->num_placement; i++) { @@ -1341,7 +1312,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man; int ret = -EINVAL; @@ -1364,13 +1334,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) if (mem_type > 0) { ttm_bo_force_list_clean(bdev, mem_type, false); - spin_lock(&glob->lru_lock); - if (drm_mm_clean(&man->manager)) - drm_mm_takedown(&man->manager); - else - ret = -EBUSY; - - spin_unlock(&glob->lru_lock); + ret = (*man->func->takedown)(man); } return ret; @@ -1421,6 +1385,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) return ret; + man->bdev = bdev; ret = 0; if (type != TTM_PL_SYSTEM) { @@ -1430,7 +1395,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, type); return ret; } - ret = drm_mm_init(&man->manager, 0, p_size); + + ret = (*man->func->init)(man, p_size); if (ret) return ret; } @@ -1824,6 +1790,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) struct ttm_buffer_object, swap); kref_get(&bo->list_kref); + if (!list_empty(&bo->ddestroy)) { + spin_unlock(&glob->lru_lock); + (void) ttm_bo_cleanup_refs(bo, false, false, false); + kref_put(&bo->list_kref, ttm_bo_release_list); + continue; + } + /** * Reserve buffer. Since we unlock while sleeping, we need * to re-check that nobody removed us from the swap-list while diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c new file mode 100644 index 000000000000..7410c190c891 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -0,0 +1,148 @@ +/************************************************************************** + * + * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "ttm/ttm_module.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement.h" +#include +#include +#include +#include +#include +#include + +static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct ttm_bo_global *glob = man->bdev->glob; + struct drm_mm *mm = man->priv; + struct drm_mm_node *node = NULL; + unsigned long lpfn; + int ret; + + lpfn = placement->lpfn; + if (!lpfn) + lpfn = man->size; + do { + ret = drm_mm_pre_get(mm); + if (unlikely(ret)) + return ret; + + spin_lock(&glob->lru_lock); + node = drm_mm_search_free_in_range(mm, + mem->num_pages, mem->page_alignment, + placement->fpfn, lpfn, 1); + if (unlikely(node == NULL)) { + spin_unlock(&glob->lru_lock); + return 0; + } + node = drm_mm_get_block_atomic_range(node, mem->num_pages, + mem->page_alignment, + placement->fpfn, + lpfn); + spin_unlock(&glob->lru_lock); + } while (node == NULL); + + mem->mm_node = node; + mem->start = node->start; + return 0; +} + +static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct ttm_bo_global *glob = man->bdev->glob; + + if (mem->mm_node) { + spin_lock(&glob->lru_lock); + drm_mm_put_block(mem->mm_node); + spin_unlock(&glob->lru_lock); + mem->mm_node = NULL; + } +} + +static int ttm_bo_man_init(struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + struct drm_mm *mm; + int ret; + + mm = kzalloc(sizeof(*mm), GFP_KERNEL); + if (!mm) + return -ENOMEM; + + ret = drm_mm_init(mm, 0, p_size); + if (ret) { + kfree(mm); + return ret; + } + + man->priv = mm; + return 0; +} + +static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) +{ + struct ttm_bo_global *glob = man->bdev->glob; + struct drm_mm *mm = man->priv; + int ret = 0; + + spin_lock(&glob->lru_lock); + if (drm_mm_clean(mm)) { + drm_mm_takedown(mm); + kfree(mm); + man->priv = NULL; + } else + ret = -EBUSY; + spin_unlock(&glob->lru_lock); + return ret; +} + +static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, + const char *prefix) +{ + struct ttm_bo_global *glob = man->bdev->glob; + struct drm_mm *mm = man->priv; + + spin_lock(&glob->lru_lock); + drm_mm_debug_table(mm, prefix); + spin_unlock(&glob->lru_lock); +} + +const struct ttm_mem_type_manager_func ttm_bo_manager_func = { + ttm_bo_man_init, + ttm_bo_man_takedown, + ttm_bo_man_get_node, + ttm_bo_man_put_node, + ttm_bo_man_debug +}; +EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index e8a73e65da69..3106d5bcce32 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -39,14 +39,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) { - struct ttm_mem_reg *old_mem = &bo->mem; - - if (old_mem->mm_node) { - spin_lock(&bo->glob->lru_lock); - drm_mm_put_block(old_mem->mm_node); - spin_unlock(&bo->glob->lru_lock); - } - old_mem->mm_node = NULL; + ttm_bo_mem_put(bo, &bo->mem); } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, @@ -263,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, dir = 1; if ((old_mem->mem_type == new_mem->mem_type) && - (new_mem->mm_node->start < - old_mem->mm_node->start + old_mem->mm_node->size)) { + (new_mem->start < old_mem->start + old_mem->size)) { dir = -1; add = new_mem->num_pages - 1; } diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index b8984a5ae521..e1ff4e7a6eb0 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -51,8 +51,6 @@ static struct drm_driver driver = { .reclaim_buffers_locked = NULL, .reclaim_buffers_idlelocked = via_reclaim_buffers_locked, .lastclose = via_lastclose, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = via_ioctls, .fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 4505e17df3f5..c9281a1b1d3b 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ - vmwgfx_overlay.o vmwgfx_fence.o + vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index c4f5114aee7c..80bc37b274e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; +static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | + TTM_PL_FLAG_CACHED; + struct ttm_placement vmw_vram_placement = { .fpfn = 0, .lpfn = 0, @@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = { .busy_placement = &vram_placement_flags }; +static uint32_t vram_gmr_placement_flags[] = { + TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, + VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED +}; + +struct ttm_placement vmw_vram_gmr_placement = { + .fpfn = 0, + .lpfn = 0, + .num_placement = 2, + .placement = vram_gmr_placement_flags, + .num_busy_placement = 1, + .busy_placement = &gmr_placement_flags +}; + struct ttm_placement vmw_vram_sys_placement = { .fpfn = 0, .lpfn = 0, @@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = { struct vmw_ttm_backend { struct ttm_backend backend; + struct page **pages; + unsigned long num_pages; + struct vmw_private *dev_priv; + int gmr_id; }; static int vmw_ttm_populate(struct ttm_backend *backend, unsigned long num_pages, struct page **pages, struct page *dummy_read_page) { + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + vmw_be->pages = pages; + vmw_be->num_pages = num_pages; + return 0; } static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) { - return 0; + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + vmw_be->gmr_id = bo_mem->start; + + return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages, + vmw_be->num_pages, vmw_be->gmr_id); } static int vmw_ttm_unbind(struct ttm_backend *backend) { + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); return 0; } static void vmw_ttm_clear(struct ttm_backend *backend) { + struct vmw_ttm_backend *vmw_be = + container_of(backend, struct vmw_ttm_backend, backend); + + vmw_be->pages = NULL; + vmw_be->num_pages = 0; } static void vmw_ttm_destroy(struct ttm_backend *backend) @@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) return NULL; vmw_be->backend.func = &vmw_ttm_func; + vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); return &vmw_be->backend; } @@ -142,15 +185,28 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, /* System memory */ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; + man->available_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: /* "On-card" video ram */ + man->func = &ttm_bo_manager_func; man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_WC; + man->available_caching = TTM_PL_FLAG_CACHED; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case VMW_PL_GMR: + /* + * "Guest Memory Regions" is an aperture like feature with + * one slot per bo. There is an upper limit of the number of + * slots as well as the bo size. + */ + man->func = &vmw_gmrid_manager_func; + man->gpu_offset = 0; + man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_CACHED; + man->default_caching = TTM_PL_FLAG_CACHED; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); @@ -174,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) return 0; } -static void vmw_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem) -{ - if (new_mem->mem_type != TTM_PL_SYSTEM) - vmw_dmabuf_gmr_unbind(bo); -} - -static void vmw_swap_notify(struct ttm_buffer_object *bo) -{ - vmw_dmabuf_gmr_unbind(bo); -} - static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; @@ -200,10 +244,10 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: - /* System memory */ + case VMW_PL_GMR: return 0; case TTM_PL_VRAM: - mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = dev_priv->vram_start; mem->bus.is_iomem = true; break; @@ -276,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = { .sync_obj_flush = vmw_sync_obj_flush, .sync_obj_unref = vmw_sync_obj_unref, .sync_obj_ref = vmw_sync_obj_ref, - .move_notify = vmw_move_notify, - .swap_notify = vmw_swap_notify, + .move_notify = NULL, + .swap_notify = NULL, .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, .io_mem_free = &vmw_ttm_io_mem_free, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 2ef93df9e8ae..10ca97ee0206 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) idr_init(&dev_priv->context_idr); idr_init(&dev_priv->surface_idr); idr_init(&dev_priv->stream_idr); - ida_init(&dev_priv->gmr_ida); mutex_init(&dev_priv->init_mutex); init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fifo_queue); atomic_set(&dev_priv->fence_queue_waiters, 0); atomic_set(&dev_priv->fifo_queue_waiters, 0); - INIT_LIST_HEAD(&dev_priv->gmr_lru); dev_priv->io_start = pci_resource_start(dev->pdev, 0); dev_priv->vram_start = pci_resource_start(dev->pdev, 1); @@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err2; } + dev_priv->has_gmr = true; + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, + dev_priv->max_gmr_ids) != 0) { + DRM_INFO("No GMR memory available. " + "Graphics memory resources are very limited.\n"); + dev_priv->has_gmr = false; + } + dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); @@ -440,13 +446,14 @@ out_err4: out_err3: drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); + if (dev_priv->has_gmr) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); out_err0: - ida_destroy(&dev_priv->gmr_ida); idr_destroy(&dev_priv->surface_idr); idr_destroy(&dev_priv->context_idr); idr_destroy(&dev_priv->stream_idr); @@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev) iounmap(dev_priv->mmio_virt); drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); + if (dev_priv->has_gmr) + (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); - ida_destroy(&dev_priv->gmr_ida); idr_destroy(&dev_priv->surface_idr); idr_destroy(&dev_priv->context_idr); idr_destroy(&dev_priv->stream_idr); @@ -597,6 +605,8 @@ static void vmw_lastclose(struct drm_device *dev) static void vmw_master_init(struct vmw_master *vmaster) { ttm_lock_init(&vmaster->lock); + INIT_LIST_HEAD(&vmaster->fb_surf); + mutex_init(&vmaster->fb_surf_mutex); } static int vmw_master_create(struct drm_device *dev, @@ -608,7 +618,7 @@ static int vmw_master_create(struct drm_device *dev, if (unlikely(vmaster == NULL)) return -ENOMEM; - ttm_lock_init(&vmaster->lock); + vmw_master_init(vmaster); ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); master->driver_priv = vmaster; @@ -699,6 +709,7 @@ static void vmw_master_drop(struct drm_device *dev, vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); + vmw_kms_idle_workqueues(vmaster); if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); @@ -751,15 +762,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, * Buffer contents is moved to swappable memory. */ ttm_bo_swapout_all(&dev_priv->bdev); + break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: + case PM_POST_RESTORE: ttm_suspend_unlock(&vmaster->lock); + break; case PM_RESTORE_PREPARE: break; - case PM_POST_RESTORE: - break; default: break; } @@ -770,21 +782,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, * These might not be needed with the virtual SVGA device. */ -int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) { + struct drm_device *dev = pci_get_drvdata(pdev); + struct vmw_private *dev_priv = vmw_priv(dev); + + if (dev_priv->num_3d_resources != 0) { + DRM_INFO("Can't suspend or hibernate " + "while 3D resources are active.\n"); + return -EBUSY; + } + pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } -int vmw_pci_resume(struct pci_dev *pdev) +static int vmw_pci_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return pci_enable_device(pdev); } +static int vmw_pm_suspend(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + struct pm_message dummy; + + dummy.event = 0; + + return vmw_pci_suspend(pdev, dummy); +} + +static int vmw_pm_resume(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + + return vmw_pci_resume(pdev); +} + +static int vmw_pm_prepare(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + struct drm_device *dev = pci_get_drvdata(pdev); + struct vmw_private *dev_priv = vmw_priv(dev); + + /** + * Release 3d reference held by fbdev and potentially + * stop fifo. + */ + dev_priv->suspended = true; + if (dev_priv->enable_fb) + vmw_3d_resource_dec(dev_priv); + + if (dev_priv->num_3d_resources != 0) { + + DRM_INFO("Can't suspend or hibernate " + "while 3D resources are active.\n"); + + if (dev_priv->enable_fb) + vmw_3d_resource_inc(dev_priv); + dev_priv->suspended = false; + return -EBUSY; + } + + return 0; +} + +static void vmw_pm_complete(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + struct drm_device *dev = pci_get_drvdata(pdev); + struct vmw_private *dev_priv = vmw_priv(dev); + + /** + * Reclaim 3d reference held by fbdev and potentially + * start fifo. + */ + if (dev_priv->enable_fb) + vmw_3d_resource_inc(dev_priv); + + dev_priv->suspended = false; +} + +static const struct dev_pm_ops vmw_pm_ops = { + .prepare = vmw_pm_prepare, + .complete = vmw_pm_complete, + .suspend = vmw_pm_suspend, + .resume = vmw_pm_resume, +}; + static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, @@ -798,8 +887,6 @@ static struct drm_driver driver = { .irq_handler = vmw_irq_handler, .get_vblank_counter = vmw_get_vblank_counter, .reclaim_buffers_locked = NULL, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = vmw_ioctls, .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), .dma_quiescent = NULL, /*vmw_dma_quiescent, */ @@ -821,15 +908,16 @@ static struct drm_driver driver = { .compat_ioctl = drm_compat_ioctl, #endif .llseek = noop_llseek, - }, + }, .pci_driver = { - .name = VMWGFX_DRIVER_NAME, - .id_table = vmw_pci_id_list, - .probe = vmw_probe, - .remove = vmw_remove, - .suspend = vmw_pci_suspend, - .resume = vmw_pci_resume - }, + .name = VMWGFX_DRIVER_NAME, + .id_table = vmw_pci_id_list, + .probe = vmw_probe, + .remove = vmw_remove, + .driver = { + .pm = &vmw_pm_ops + } + }, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, .date = VMWGFX_DRIVER_DATE, @@ -863,3 +951,7 @@ module_exit(vmwgfx_exit); MODULE_AUTHOR("VMware Inc. and others"); MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); MODULE_LICENSE("GPL and additional rights"); +MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." + __stringify(VMWGFX_DRIVER_MINOR) "." + __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." + "0"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 58de6393f611..e7a58d055041 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -39,9 +39,9 @@ #include "ttm/ttm_execbuf_util.h" #include "ttm/ttm_module.h" -#define VMWGFX_DRIVER_DATE "20100209" +#define VMWGFX_DRIVER_DATE "20100927" #define VMWGFX_DRIVER_MAJOR 1 -#define VMWGFX_DRIVER_MINOR 2 +#define VMWGFX_DRIVER_MINOR 4 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) @@ -49,6 +49,9 @@ #define VMWGFX_MAX_GMRS 2048 #define VMWGFX_MAX_DISPLAYS 16 +#define VMW_PL_GMR TTM_PL_PRIV0 +#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 + struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; @@ -57,8 +60,6 @@ struct vmw_fpriv { struct vmw_dma_buffer { struct ttm_buffer_object base; struct list_head validate_list; - struct list_head gmr_lru; - uint32_t gmr_id; bool gmr_bound; uint32_t cur_validate_node; bool on_validate_list; @@ -151,6 +152,8 @@ struct vmw_overlay; struct vmw_master { struct ttm_lock lock; + struct mutex fb_surf_mutex; + struct list_head fb_surf; }; struct vmw_vga_topology_state { @@ -182,6 +185,7 @@ struct vmw_private { uint32_t capabilities; uint32_t max_gmr_descriptors; uint32_t max_gmr_ids; + bool has_gmr; struct mutex hw_mutex; /* @@ -263,14 +267,6 @@ struct vmw_private { uint32_t val_seq; struct mutex cmdbuf_mutex; - /** - * GMR management. Protected by the lru spinlock. - */ - - struct ida gmr_ida; - struct list_head gmr_lru; - - /** * Operating mode. */ @@ -286,6 +282,7 @@ struct vmw_private { struct vmw_master *active_master; struct vmw_master fbdev_master; struct notifier_block pm_nb; + bool suspended; struct mutex release_mutex; uint32_t num_3d_resources; @@ -331,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv); */ extern int vmw_gmr_bind(struct vmw_private *dev_priv, - struct ttm_buffer_object *bo); + struct page *pages[], + unsigned long num_pages, + int gmr_id); extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); /** @@ -380,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, uint32_t id, struct vmw_dma_buffer **out); -extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo); -extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id); -extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id); extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, struct vmw_dma_buffer *bo); extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, struct vmw_dma_buffer *bo); -extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo); extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, @@ -439,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); extern struct ttm_placement vmw_vram_placement; extern struct ttm_placement vmw_vram_ne_placement; extern struct ttm_placement vmw_vram_sys_placement; +extern struct ttm_placement vmw_vram_gmr_placement; extern struct ttm_placement vmw_sys_placement; extern struct ttm_bo_driver vmw_bo_driver; extern int vmw_dma_quiescent(struct drm_device *dev); @@ -518,6 +514,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned bbp, unsigned depth); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +void vmw_kms_idle_workqueues(struct vmw_master *vmaster); +bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, + uint32_t pitch, + uint32_t height); u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); /** @@ -536,6 +536,12 @@ int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); int vmw_overlay_num_overlays(struct vmw_private *dev_priv); int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); +/** + * GMR Id manager + */ + +extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; + /** * Inline helper functions */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 8e396850513c..51d9f9f1d7f2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context) reloc = &sw_context->relocs[i]; validate = &sw_context->val_bufs[reloc->index]; bo = validate->bo; - reloc->location->offset += bo->offset; - reloc->location->gmrId = vmw_dmabuf_gmr(bo); + if (bo->mem.mem_type == TTM_PL_VRAM) { + reloc->location->offset += bo->offset; + reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; + } else + reloc->location->gmrId = bo->mem.start; } vmw_free_relocations(sw_context); } @@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, { int ret; - if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) - return 0; - /** - * Put BO in VRAM, only if there is space. + * Put BO in VRAM if there is space, otherwise as a GMR. + * If there is no space in VRAM and GMR ids are all used up, + * start evicting GMRs to make room. If the DMA buffer can't be + * used as a GMR, this will return -ENOMEM. */ - ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false); - if (unlikely(ret == -ERESTARTSYS)) - return ret; - - /** - * Otherwise, set it up as GMR. - */ - - if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) - return 0; - - ret = vmw_gmr_bind(dev_priv, bo); + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); if (likely(ret == 0 || ret == -ERESTARTSYS)) return ret; @@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, * previous contents. */ + DRM_INFO("Falling through to VRAM.\n"); ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 409e172f4abf..41d9a5b73c03 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, return -EINVAL; } + if (!vmw_kms_validate_mode_vram(vmw_priv, + info->fix.line_length, + var->yoffset + var->yres)) { + DRM_ERROR("Requested geom can not fit in framebuffer\n"); + return -EINVAL; + } + return 0; } @@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par) SVGAFifoCmdUpdate body; } *cmd; + if (vmw_priv->suspended) + return; + spin_lock_irqsave(&par->dirty.lock, flags); if (!par->dirty.active) { spin_unlock_irqrestore(&par->dirty.lock, flags); @@ -616,7 +626,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, goto err_unlock; if (bo->mem.mem_type == TTM_PL_VRAM && - bo->mem.mm_node->start < bo->num_pages) + bo->mem.start < bo->num_pages && + bo->mem.start > 0) (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 5f8908a5d7fd..de0c5948521d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, */ static unsigned long vmw_gmr_count_descriptors(struct page *pages[], - unsigned long num_pages) + unsigned long num_pages) { unsigned long prev_pfn = ~(0UL); unsigned long pfn; @@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[], } int vmw_gmr_bind(struct vmw_private *dev_priv, - struct ttm_buffer_object *bo) + struct page *pages[], + unsigned long num_pages, + int gmr_id) { - struct ttm_tt *ttm = bo->ttm; - unsigned long descriptors; - int ret; - uint32_t id; struct list_head desc_pages; + int ret; - if (!(dev_priv->capabilities & SVGA_CAP_GMR)) + if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) return -EINVAL; - ret = ttm_tt_populate(ttm); - if (unlikely(ret != 0)) - return ret; - - descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages); - if (unlikely(descriptors > dev_priv->max_gmr_descriptors)) + if (vmw_gmr_count_descriptors(pages, num_pages) > + dev_priv->max_gmr_descriptors) return -EINVAL; INIT_LIST_HEAD(&desc_pages); - ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages, - ttm->num_pages); + + ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages); if (unlikely(ret != 0)) return ret; - ret = vmw_gmr_id_alloc(dev_priv, &id); - if (unlikely(ret != 0)) - goto out_no_id; - - vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages); + vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages); vmw_gmr_free_descriptors(&desc_pages); - vmw_dmabuf_set_gmr(bo, id); + return 0; - -out_no_id: - vmw_gmr_free_descriptors(&desc_pages); - return ret; } + void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) { mutex_lock(&dev_priv->hw_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c new file mode 100644 index 000000000000..ac6e0d1bd629 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -0,0 +1,137 @@ +/************************************************************************** + * + * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "vmwgfx_drv.h" +#include "ttm/ttm_module.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement.h" +#include +#include +#include + +struct vmwgfx_gmrid_man { + spinlock_t lock; + struct ida gmr_ida; + uint32_t max_gmr_ids; +}; + +static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct vmwgfx_gmrid_man *gman = + (struct vmwgfx_gmrid_man *)man->priv; + int ret; + int id; + + mem->mm_node = NULL; + + do { + if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) + return -ENOMEM; + + spin_lock(&gman->lock); + ret = ida_get_new(&gman->gmr_ida, &id); + + if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) { + ida_remove(&gman->gmr_ida, id); + spin_unlock(&gman->lock); + return 0; + } + + spin_unlock(&gman->lock); + + } while (ret == -EAGAIN); + + if (likely(ret == 0)) { + mem->mm_node = gman; + mem->start = id; + } + + return ret; +} + +static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct vmwgfx_gmrid_man *gman = + (struct vmwgfx_gmrid_man *)man->priv; + + if (mem->mm_node) { + spin_lock(&gman->lock); + ida_remove(&gman->gmr_ida, mem->start); + spin_unlock(&gman->lock); + mem->mm_node = NULL; + } +} + +static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + struct vmwgfx_gmrid_man *gman = + kzalloc(sizeof(*gman), GFP_KERNEL); + + if (unlikely(gman == NULL)) + return -ENOMEM; + + spin_lock_init(&gman->lock); + ida_init(&gman->gmr_ida); + gman->max_gmr_ids = p_size; + man->priv = (void *) gman; + return 0; +} + +static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man) +{ + struct vmwgfx_gmrid_man *gman = + (struct vmwgfx_gmrid_man *)man->priv; + + if (gman) { + ida_destroy(&gman->gmr_ida); + kfree(gman); + } + return 0; +} + +static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man, + const char *prefix) +{ + printk(KERN_INFO "%s: No debug info available for the GMR " + "id manager.\n", prefix); +} + +const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { + vmw_gmrid_man_init, + vmw_gmrid_man_takedown, + vmw_gmrid_man_get_node, + vmw_gmrid_man_put_node, + vmw_gmrid_man_debug +}; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 1c7a316454d8..570d57775a58 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_FIFO_CAPS: param->value = dev_priv->fifo.capabilities; break; + case DRM_VMW_PARAM_MAX_FB_SIZE: + param->value = dev_priv->vram_size; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e882ba099f0c..87c6e6156d7d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -332,18 +332,55 @@ struct vmw_framebuffer_surface { struct delayed_work d_work; struct mutex work_lock; bool present_fs; + struct list_head head; + struct drm_master *master; }; +/** + * vmw_kms_idle_workqueues - Flush workqueues on this master + * + * @vmaster - Pointer identifying the master, for the surfaces of which + * we idle the dirty work queues. + * + * This function should be called with the ttm lock held in exclusive mode + * to idle all dirty work queues before the fifo is taken down. + * + * The work task may actually requeue itself, but after the flush returns we're + * sure that there's nothing to present, since the ttm lock is held in + * exclusive mode, so the fifo will never get used. + */ + +void vmw_kms_idle_workqueues(struct vmw_master *vmaster) +{ + struct vmw_framebuffer_surface *entry; + + mutex_lock(&vmaster->fb_surf_mutex); + list_for_each_entry(entry, &vmaster->fb_surf, head) { + if (cancel_delayed_work_sync(&entry->d_work)) + (void) entry->d_work.work.func(&entry->d_work.work); + + (void) cancel_delayed_work_sync(&entry->d_work); + } + mutex_unlock(&vmaster->fb_surf_mutex); +} + void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) { - struct vmw_framebuffer_surface *vfb = + struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); + struct vmw_master *vmaster = vmw_master(vfbs->master); - cancel_delayed_work_sync(&vfb->d_work); + + mutex_lock(&vmaster->fb_surf_mutex); + list_del(&vfbs->head); + mutex_unlock(&vmaster->fb_surf_mutex); + + cancel_delayed_work_sync(&vfbs->d_work); + drm_master_put(&vfbs->master); drm_framebuffer_cleanup(framebuffer); - vmw_surface_unreference(&vfb->surface); + vmw_surface_unreference(&vfbs->surface); - kfree(framebuffer); + kfree(vfbs); } static void vmw_framebuffer_present_fs_callback(struct work_struct *work) @@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work) SVGA3dCopyRect cr; } *cmd; + /** + * Strictly we should take the ttm_lock in read mode before accessing + * the fifo, to make sure the fifo is present and up. However, + * instead we flush all workqueues under the ttm lock in exclusive mode + * before taking down the fifo. + */ mutex_lock(&vfbs->work_lock); if (!vfbs->present_fs) goto out_unlock; @@ -392,17 +435,20 @@ out_unlock: int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) { struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + struct vmw_master *vmaster = vmw_master(file_priv->master); struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); struct vmw_surface *surf = vfbs->surface; struct drm_clip_rect norect; SVGA3dCopyRect *cr; int i, inc = 1; + int ret; struct { SVGA3dCmdHeader header; @@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, SVGA3dCopyRect cr; } *cmd; + if (unlikely(vfbs->master != file_priv->master)) + return -EINVAL; + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + if (!num_clips || !(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT)) { @@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, */ vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); } + ttm_read_unlock(&vmaster->lock); return 0; } @@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); if (unlikely(cmd == NULL)) { DRM_ERROR("Fifo reserve failed.\n"); + ttm_read_unlock(&vmaster->lock); return -ENOMEM; } @@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, } vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); - + ttm_read_unlock(&vmaster->lock); return 0; } @@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { .create_handle = vmw_framebuffer_create_handle, }; -int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, - struct vmw_surface *surface, - struct vmw_framebuffer **out, - unsigned width, unsigned height) +static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, + struct drm_file *file_priv, + struct vmw_surface *surface, + struct vmw_framebuffer **out, + const struct drm_mode_fb_cmd + *mode_cmd) { struct drm_device *dev = dev_priv->dev; struct vmw_framebuffer_surface *vfbs; + enum SVGA3dSurfaceFormat format; + struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; + /* + * Sanity checks. + */ + + if (unlikely(surface->mip_levels[0] != 1 || + surface->num_sizes != 1 || + surface->sizes[0].width < mode_cmd->width || + surface->sizes[0].height < mode_cmd->height || + surface->sizes[0].depth != 1)) { + DRM_ERROR("Incompatible surface dimensions " + "for requested mode.\n"); + return -EINVAL; + } + + switch (mode_cmd->depth) { + case 32: + format = SVGA3D_A8R8G8B8; + break; + case 24: + format = SVGA3D_X8R8G8B8; + break; + case 16: + format = SVGA3D_R5G6B5; + break; + case 15: + format = SVGA3D_A1R5G5B5; + break; + default: + DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); + return -EINVAL; + } + + if (unlikely(format != surface->format)) { + DRM_ERROR("Invalid surface format for requested mode.\n"); + return -EINVAL; + } + vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); if (!vfbs) { ret = -ENOMEM; @@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, } /* XXX get the first 3 from the surface info */ - vfbs->base.base.bits_per_pixel = 32; - vfbs->base.base.pitch = width * 32 / 4; - vfbs->base.base.depth = 24; - vfbs->base.base.width = width; - vfbs->base.base.height = height; + vfbs->base.base.bits_per_pixel = mode_cmd->bpp; + vfbs->base.base.pitch = mode_cmd->pitch; + vfbs->base.base.depth = mode_cmd->depth; + vfbs->base.base.width = mode_cmd->width; + vfbs->base.base.height = mode_cmd->height; vfbs->base.pin = &vmw_surface_dmabuf_pin; vfbs->base.unpin = &vmw_surface_dmabuf_unpin; vfbs->surface = surface; + vfbs->master = drm_master_get(file_priv->master); mutex_init(&vfbs->work_lock); + + mutex_lock(&vmaster->fb_surf_mutex); INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); + list_add_tail(&vfbs->head, &vmaster->fb_surf); + mutex_unlock(&vmaster->fb_surf_mutex); + *out = &vfbs->base; return 0; @@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) } int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) { struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); + struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_clip_rect norect; + int ret; struct { uint32_t header; SVGAFifoCmdUpdate body; } *cmd; int i, increment = 1; + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + if (!num_clips) { num_clips = 1; clips = &norect; @@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); if (unlikely(cmd == NULL)) { DRM_ERROR("Fifo reserve failed.\n"); + ttm_read_unlock(&vmaster->lock); return -ENOMEM; } @@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, } vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); + ttm_read_unlock(&vmaster->lock); return 0; } @@ -659,16 +770,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); } -int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, - struct vmw_dma_buffer *dmabuf, - struct vmw_framebuffer **out, - unsigned width, unsigned height) +static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, + struct vmw_dma_buffer *dmabuf, + struct vmw_framebuffer **out, + const struct drm_mode_fb_cmd + *mode_cmd) { struct drm_device *dev = dev_priv->dev; struct vmw_framebuffer_dmabuf *vfbd; + unsigned int requested_size; int ret; + requested_size = mode_cmd->height * mode_cmd->pitch; + if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { + DRM_ERROR("Screen buffer object size is too small " + "for requested mode.\n"); + return -EINVAL; + } + vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); if (!vfbd) { ret = -ENOMEM; @@ -685,12 +805,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, goto out_err3; } - /* XXX get the first 3 from the surface info */ - vfbd->base.base.bits_per_pixel = 32; - vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; - vfbd->base.base.depth = 24; - vfbd->base.base.width = width; - vfbd->base.base.height = height; + vfbd->base.base.bits_per_pixel = mode_cmd->bpp; + vfbd->base.base.pitch = mode_cmd->pitch; + vfbd->base.base.depth = mode_cmd->depth; + vfbd->base.base.width = mode_cmd->width; + vfbd->base.base.height = mode_cmd->height; vfbd->base.pin = vmw_framebuffer_dmabuf_pin; vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; vfbd->buffer = dmabuf; @@ -719,8 +838,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_framebuffer *vfb = NULL; struct vmw_surface *surface = NULL; struct vmw_dma_buffer *bo = NULL; + u64 required_size; int ret; + /** + * This code should be conditioned on Screen Objects not being used. + * If screen objects are used, we can allocate a GMR to hold the + * requested framebuffer. + */ + + required_size = mode_cmd->pitch * mode_cmd->height; + if (unlikely(required_size > (u64) dev_priv->vram_size)) { + DRM_ERROR("VRAM size is too small for requested mode.\n"); + return NULL; + } + + /** + * End conditioned code. + */ + ret = vmw_user_surface_lookup_handle(dev_priv, tfile, mode_cmd->handle, &surface); if (ret) @@ -729,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, if (!surface->scanout) goto err_not_scanout; - ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, - mode_cmd->width, mode_cmd->height); + ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface, + &vfb, mode_cmd); /* vmw_user_surface_lookup takes one ref so does new_fb */ vmw_surface_unreference(&surface); @@ -751,7 +887,7 @@ try_dmabuf: } ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, - mode_cmd->width, mode_cmd->height); + mode_cmd); /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ vmw_dmabuf_unreference(&bo); @@ -889,6 +1025,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) vmw_priv->num_displays = vmw_read(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS); + if (vmw_priv->num_displays == 0) + vmw_priv->num_displays = 1; + for (i = 0; i < vmw_priv->num_displays; ++i) { save = &vmw_priv->vga_save[i]; vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); @@ -997,6 +1136,13 @@ out_unlock: return ret; } +bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, + uint32_t pitch, + uint32_t height) +{ + return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; +} + u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) { return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 11cb39e3accb..a01c47ddb5bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, { struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); struct drm_device *dev = connector->dev; + struct vmw_private *dev_priv = vmw_priv(dev); struct drm_display_mode *mode = NULL; + struct drm_display_mode *bmode; struct drm_display_mode prefmode = { DRM_MODE("preferred", DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -443,22 +445,30 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, mode->hdisplay = ldu->pref_width; mode->vdisplay = ldu->pref_height; mode->vrefresh = drm_mode_vrefresh(mode); - drm_mode_probed_add(connector, mode); + if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, + mode->vdisplay)) { + drm_mode_probed_add(connector, mode); - if (ldu->pref_mode) { - list_del_init(&ldu->pref_mode->head); - drm_mode_destroy(dev, ldu->pref_mode); + if (ldu->pref_mode) { + list_del_init(&ldu->pref_mode->head); + drm_mode_destroy(dev, ldu->pref_mode); + } + + ldu->pref_mode = mode; } - - ldu->pref_mode = mode; } for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { - if (vmw_ldu_connector_builtin[i].hdisplay > max_width || - vmw_ldu_connector_builtin[i].vdisplay > max_height) + bmode = &vmw_ldu_connector_builtin[i]; + if (bmode->hdisplay > max_width || + bmode->vdisplay > max_height) continue; - mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]); + if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, + bmode->vdisplay)) + continue; + + mode = drm_mode_duplicate(dev, bmode); if (!mode) return 0; mode->vrefresh = drm_mode_vrefresh(mode); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c8c40e9979db..36e129f0023f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, return bo_user_size + page_array_size; } -void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo) -{ - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); - struct ttm_bo_global *glob = bo->glob; - struct vmw_private *dev_priv = - container_of(bo->bdev, struct vmw_private, bdev); - - if (vmw_bo->gmr_bound) { - vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); - spin_lock(&glob->lru_lock); - ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); - spin_unlock(&glob->lru_lock); - vmw_bo->gmr_bound = false; - } -} - void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) { struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); struct ttm_bo_global *glob = bo->glob; - vmw_dmabuf_gmr_unbind(bo); ttm_mem_global_free(glob->mem_glob, bo->acc_size); kfree(vmw_bo); } @@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, memset(vmw_bo, 0, sizeof(*vmw_bo)); - INIT_LIST_HEAD(&vmw_bo->gmr_lru); INIT_LIST_HEAD(&vmw_bo->validate_list); - vmw_bo->gmr_id = 0; - vmw_bo->gmr_bound = false; ret = ttm_bo_init(bdev, &vmw_bo->base, size, ttm_bo_type_device, placement, @@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); struct ttm_bo_global *glob = bo->glob; - vmw_dmabuf_gmr_unbind(bo); ttm_mem_global_free(glob->mem_glob, bo->acc_size); kfree(vmw_user_bo); } @@ -938,25 +917,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) vmw_bo->on_validate_list = false; } -uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo) -{ - struct vmw_dma_buffer *vmw_bo; - - if (bo->mem.mem_type == TTM_PL_VRAM) - return SVGA_GMR_FRAMEBUFFER; - - vmw_bo = vmw_dma_buffer(bo); - - return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL; -} - -void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id) -{ - struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); - vmw_bo->gmr_bound = true; - vmw_bo->gmr_id = id; -} - int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, uint32_t handle, struct vmw_dma_buffer **out) { @@ -985,41 +945,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return 0; } -/** - * TODO: Implement a gmr id eviction mechanism. Currently we just fail - * when we're out of ids, causing GMR space to be allocated - * out of VRAM. - */ - -int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) -{ - struct ttm_bo_global *glob = dev_priv->bdev.glob; - int id; - int ret; - - do { - if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0)) - return -ENOMEM; - - spin_lock(&glob->lru_lock); - ret = ida_get_new(&dev_priv->gmr_ida, &id); - spin_unlock(&glob->lru_lock); - } while (ret == -EAGAIN); - - if (unlikely(ret != 0)) - return ret; - - if (unlikely(id >= dev_priv->max_gmr_ids)) { - spin_lock(&glob->lru_lock); - ida_remove(&dev_priv->gmr_ida, id); - spin_unlock(&glob->lru_lock); - return -EBUSY; - } - - *p_id = (uint32_t) id; - return 0; -} - /* * Stream management */ diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig new file mode 100644 index 000000000000..742c423567cf --- /dev/null +++ b/drivers/gpu/stub/Kconfig @@ -0,0 +1,13 @@ +config STUB_POULSBO + tristate "Intel GMA500 Stub Driver" + depends on PCI + # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled + # but for select to work, need to select ACPI_VIDEO's dependencies, ick + select ACPI_VIDEO if ACPI + help + Choose this option if you have a system that has Intel GMA500 + (Poulsbo) integrated graphics. If M is selected, the module will + be called Poulsbo. This driver is a stub driver for Poulsbo that + will call poulsbo.ko to enable the acpi backlight control sysfs + entry file because there have no poulsbo native driver can support + intel opregion. diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile new file mode 100644 index 000000000000..cd940cc9d36d --- /dev/null +++ b/drivers/gpu/stub/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_STUB_POULSBO) += poulsbo.o diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c new file mode 100644 index 000000000000..7edfd27b8dee --- /dev/null +++ b/drivers/gpu/stub/poulsbo.c @@ -0,0 +1,64 @@ +/* + * Intel Poulsbo Stub driver + * + * Copyright (C) 2010 Novell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + */ + +#include +#include +#include +#include + +#define DRIVER_NAME "poulsbo" + +enum { + CHIP_PSB_8108 = 0, + CHIP_PSB_8109 = 1, +}; + +static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { + {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \ + {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \ + {0, 0, 0} +}; + +static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return acpi_video_register(); +} + +static void poulsbo_remove(struct pci_dev *pdev) +{ + acpi_video_unregister(); +} + +static struct pci_driver poulsbo_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = poulsbo_probe, + .remove = poulsbo_remove, +}; + +static int __init poulsbo_init(void) +{ + return pci_register_driver(&poulsbo_driver); +} + +static void __exit poulsbo_exit(void) +{ + pci_unregister_driver(&poulsbo_driver); +} + +module_init(poulsbo_init); +module_exit(poulsbo_exit); + +MODULE_AUTHOR("Lee, Chun-Yi "); +MODULE_DESCRIPTION("Poulsbo Stub Driver"); +MODULE_LICENSE("GPL"); + +MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 596ef6b922bf..27c1fb4b1e0d 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -17,6 +17,8 @@ source "drivers/gpu/vga/Kconfig" source "drivers/gpu/drm/Kconfig" +source "drivers/gpu/stub/Kconfig" + config VGASTATE tristate default n diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 4c9461a4f9e6..274eaaa15c36 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -699,13 +699,8 @@ struct drm_driver { int (*suspend) (struct drm_device *, pm_message_t state); int (*resume) (struct drm_device *); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); - void (*dma_ready) (struct drm_device *); int (*dma_quiescent) (struct drm_device *); - int (*context_ctor) (struct drm_device *dev, int context); int (*context_dtor) (struct drm_device *dev, int context); - int (*kernel_context_switch) (struct drm_device *dev, int old, - int new); - void (*kernel_context_switch_unlock) (struct drm_device *dev); /** * get_vblank_counter - get raw hardware vblank counter @@ -777,8 +772,6 @@ struct drm_driver { struct drm_file *file_priv); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, struct drm_file *file_priv); - resource_size_t (*get_map_ofs) (struct drm_local_map * map); - resource_size_t (*get_reg_ofs) (struct drm_device *dev); void (*set_version) (struct drm_device *dev, struct drm_set_version *sv); @@ -795,8 +788,6 @@ struct drm_driver { void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, bool from_release); - int (*proc_init)(struct drm_minor *minor); - void (*proc_cleanup)(struct drm_minor *minor); int (*debugfs_init)(struct drm_minor *minor); void (*debugfs_cleanup)(struct drm_minor *minor); @@ -972,7 +963,6 @@ struct drm_device { __volatile__ long context_flag; /**< Context swapping flag */ __volatile__ long interrupt_flag; /**< Interruption handler flag */ __volatile__ long dma_flag; /**< DMA dispatch flag */ - struct timer_list timer; /**< Timer for delaying ctx switch */ wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ int last_checked; /**< Last context checked for DMA */ int last_context; /**< Last current context */ @@ -1045,25 +1035,12 @@ struct drm_device { struct drm_minor *control; /**< Control node for card */ struct drm_minor *primary; /**< render type primary screen head */ - /** \name Drawable information */ - /*@{ */ - spinlock_t drw_lock; - struct idr drw_idr; - /*@} */ - struct drm_mode_config mode_config; /**< Current mode config */ /** \name GEM information */ /*@{ */ spinlock_t object_name_lock; struct idr object_name_idr; - atomic_t object_count; - atomic_t object_memory; - atomic_t pin_count; - atomic_t pin_memory; - atomic_t gtt_count; - atomic_t gtt_memory; - uint32_t gtt_total; uint32_t invalidate_domains; /* domains pending invalidation */ uint32_t flush_domains; /* domains pending flush */ /*@} */ @@ -1175,8 +1152,6 @@ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); extern void drm_vm_open_locked(struct vm_area_struct *vma); extern void drm_vm_close_locked(struct vm_area_struct *vma); -extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); -extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Memory management support (drm_memory.h) */ @@ -1186,8 +1161,7 @@ extern int drm_mem_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); -extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); -extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); +extern void drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, struct page **pages, @@ -1239,17 +1213,6 @@ extern int drm_setsareactx(struct drm_device *dev, void *data, extern int drm_getsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); - /* Drawable IOCTL support (drm_drawable.h) */ -extern int drm_adddraw(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_rmdraw(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int drm_update_drawable_info(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, - drm_drawable_t id); -extern void drm_drawable_free_all(struct drm_device *dev); - /* Authentication IOCTL support (drm_auth.h) */ extern int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1264,7 +1227,6 @@ extern int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); extern void drm_idlelock_take(struct drm_lock_data *lock_data); extern void drm_idlelock_release(struct drm_lock_data *lock_data); @@ -1359,10 +1321,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type); -extern int drm_agp_free_memory(DRM_AGP_MEM * handle); -extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); -extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); extern void drm_agp_chipset_flush(struct drm_device *dev); /* Stub support (drm_stub.h) */ @@ -1414,7 +1372,6 @@ extern int drm_bufs_info(struct seq_file *m, void *data); extern int drm_vblank_info(struct seq_file *m, void *data); extern int drm_clients_info(struct seq_file *m, void* data); extern int drm_gem_name_info(struct seq_file *m, void *data); -extern int drm_gem_object_info(struct seq_file *m, void* data); #if DRM_DEBUG_CODE extern int drm_vma_info(struct seq_file *m, void *data); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 3e5a51af757c..029aa688e787 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -221,7 +221,8 @@ struct drm_framebuffer_funcs { * the semantics and arguments have a one to one mapping * on this function. */ - int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags, + int (*dirty)(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips); }; @@ -762,6 +763,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern bool drm_detect_hdmi_monitor(struct edid *edid); +extern bool drm_detect_monitor_audio(struct edid *edid); extern int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 59b7073b13fe..73b071203dcc 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -39,6 +39,11 @@ #include +enum mode_set_atomic { + LEAVE_ATOMIC_MODE_SET, + ENTER_ATOMIC_MODE_SET, +}; + struct drm_crtc_helper_funcs { /* * Control power levels on the CRTC. If the mode passed in is @@ -61,7 +66,8 @@ struct drm_crtc_helper_funcs { int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); int (*mode_set_base_atomic)(struct drm_crtc *crtc, - struct drm_framebuffer *fb, int x, int y); + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic); /* reload the current crtc LUT */ void (*load_lut)(struct drm_crtc *crtc); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index a49e791db0b0..83a389e44543 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -23,6 +23,9 @@ #ifndef _DRM_DP_HELPER_H_ #define _DRM_DP_HELPER_H_ +#include +#include + /* From the VESA DisplayPort spec */ #define AUX_NATIVE_WRITE 0x8 diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index e41c74facb6a..8c641bed9bbd 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -286,6 +286,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_PAGEFLIPPING 8 #define I915_PARAM_HAS_EXECBUF2 9 #define I915_PARAM_HAS_BSD 10 +#define I915_PARAM_HAS_BLT 11 typedef struct drm_i915_getparam { int param; @@ -627,8 +628,11 @@ struct drm_i915_gem_execbuffer2 { __u32 num_cliprects; /** This is a struct drm_clip_rect *cliprects */ __u64 cliprects_ptr; +#define I915_EXEC_RING_MASK (7<<0) +#define I915_EXEC_DEFAULT (0<<0) #define I915_EXEC_RENDER (1<<0) -#define I915_EXEC_BSD (1<<1) +#define I915_EXEC_BSD (2<<0) +#define I915_EXEC_BLT (3<<0) __u64 flags; __u64 rsvd1; __u64 rsvd2; diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h new file mode 100644 index 000000000000..d3c81946f613 --- /dev/null +++ b/include/drm/intel-gtt.h @@ -0,0 +1,18 @@ +/* Common header for intel-gtt.ko and i915.ko */ + +#ifndef _DRM_INTEL_GTT_H +#define _DRM_INTEL_GTT_H +struct intel_gtt { + /* Number of stolen gtt entries at the beginning. */ + unsigned int gtt_stolen_entries; + /* Total number of gtt entries. */ + unsigned int gtt_total_entries; + /* Part of the gtt that is mappable by the cpu, for those chips where + * this is not the full gtt. */ + unsigned int gtt_mappable_entries; +}; + +struct intel_gtt *intel_gtt_get(void); + +#endif + diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 2040e6c4f172..5afa5b52063e 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -102,7 +102,8 @@ struct ttm_bus_placement { */ struct ttm_mem_reg { - struct drm_mm_node *mm_node; + void *mm_node; + unsigned long start; unsigned long size; unsigned long num_pages; uint32_t page_alignment; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index b87504235f18..d01b4ddbdc56 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -203,7 +203,22 @@ struct ttm_tt { * It's set up by the ttm_bo_driver::init_mem_type method. */ +struct ttm_mem_type_manager; + +struct ttm_mem_type_manager_func { + int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); + int (*takedown)(struct ttm_mem_type_manager *man); + int (*get_node)(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem); + void (*put_node)(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem); + void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); +}; + struct ttm_mem_type_manager { + struct ttm_bo_device *bdev; /* * No protection. Constant from start. @@ -222,8 +237,8 @@ struct ttm_mem_type_manager { * TODO: Consider one lru_lock per ttm_mem_type_manager. * Plays ill with list removal, though. */ - - struct drm_mm manager; + const struct ttm_mem_type_manager_func *func; + void *priv; struct list_head lru; }; @@ -649,6 +664,12 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_reserve, bool no_wait_gpu); + +extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); + /** * ttm_bo_wait_for_cpu * @@ -891,6 +912,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); +extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; + #if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) #define TTM_HAS_AGP #include diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index 4d0842391edc..650e6bf6f69f 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h @@ -72,6 +72,7 @@ #define DRM_VMW_PARAM_FIFO_OFFSET 3 #define DRM_VMW_PARAM_HW_CAPS 4 #define DRM_VMW_PARAM_FIFO_CAPS 5 +#define DRM_VMW_PARAM_MAX_FB_SIZE 6 /** * struct drm_vmw_getparam_arg