2012-04-24 16:47:39 +02:00
/*
* Copyright © 2008 - 2012 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eric Anholt < eric @ anholt . net >
* Chris Wilson < chris @ chris - wilson . co . uk >
*
*/
2012-10-02 19:01:07 +02:00
# include <drm/drmP.h>
# include <drm/i915_drm.h>
2012-04-24 16:47:39 +02:00
# include "i915_drv.h"
/*
* The BIOS typically reserves some of the system ' s memory for the exclusive
* use of the integrated graphics . This memory is no longer available for
* use by the OS and so the user finds that his system has less memory
* available than he put in . We refer to this memory as stolen .
*
* The BIOS will allocate its framebuffer from the stolen memory . Our
* goal is try to reuse that object for our own fbcon which must always
* be available for panics . Anything else we can reuse the stolen memory
* for is a boon .
*/
2012-11-15 12:32:18 +01:00
static unsigned long i915_stolen_to_physical ( struct drm_device * dev )
2012-04-24 16:47:39 +02:00
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-07-04 13:28:35 +02:00
struct resource * r ;
2012-04-24 16:47:39 +02:00
u32 base ;
2013-07-04 01:23:33 +02:00
/* Almost universally we can find the Graphics Base of Stolen Memory
* at offset 0x5c in the igfx configuration space . On a few ( desktop )
* machines this is also mirrored in the bridge device at different
* locations , or in the MCHBAR . On gen2 , the layout is again slightly
* different with the Graphics Segment immediately following Top of
* Memory ( or Top of Usable DRAM ) . Note it appears that TOUD is only
* reported by 865 g , so we just use the top of memory as determined
* by the e820 probe .
2012-11-15 12:32:18 +01:00
*
2013-07-04 01:23:33 +02:00
* XXX However gen2 requires an unavailable symbol .
2012-04-24 16:47:39 +02:00
*/
2012-11-15 12:32:18 +01:00
base = 0 ;
2013-07-04 01:23:33 +02:00
if ( INTEL_INFO ( dev ) - > gen > = 3 ) {
/* Read Graphics Base of Stolen Memory directly */
2013-05-08 19:45:13 +02:00
pci_read_config_dword ( dev - > pdev , 0x5c , & base ) ;
base & = ~ ( ( 1 < < 20 ) - 1 ) ;
2013-07-04 01:23:33 +02:00
} else { /* GEN2 */
2012-11-15 12:32:18 +01:00
#if 0
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped < < PAGE_SHIFT ;
2012-04-24 16:47:39 +02:00
# endif
2012-11-15 12:32:18 +01:00
}
2012-04-24 16:47:39 +02:00
2013-07-04 13:28:35 +02:00
if ( base = = 0 )
return 0 ;
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel . So if the region is already marked as busy , something
* is seriously wrong .
*/
r = devm_request_mem_region ( dev - > dev , base , dev_priv - > gtt . stolen_size ,
" Graphics Stolen Memory " ) ;
if ( r = = NULL ) {
DRM_ERROR ( " conflict detected with stolen region: [0x%08x - 0x%08x] \n " ,
base , base + ( uint32_t ) dev_priv - > gtt . stolen_size ) ;
base = 0 ;
}
2012-11-15 12:32:18 +01:00
return base ;
2012-04-24 16:47:39 +02:00
}
2012-11-15 12:32:20 +01:00
static int i915_setup_compression ( struct drm_device * dev , int size )
2012-04-24 16:47:39 +02:00
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_mm_node * compressed_fb , * uninitialized_var ( compressed_llb ) ;
2012-11-15 12:32:20 +01:00
/* Try to over-allocate to reduce reallocations and fragmentation */
compressed_fb = drm_mm_search_free ( & dev_priv - > mm . stolen ,
size < < = 1 , 4096 , 0 ) ;
if ( ! compressed_fb )
compressed_fb = drm_mm_search_free ( & dev_priv - > mm . stolen ,
size > > = 1 , 4096 , 0 ) ;
2012-04-24 16:47:39 +02:00
if ( compressed_fb )
compressed_fb = drm_mm_get_block ( compressed_fb , size , 4096 ) ;
if ( ! compressed_fb )
goto err ;
2012-11-15 12:32:20 +01:00
if ( HAS_PCH_SPLIT ( dev ) )
I915_WRITE ( ILK_DPFC_CB_BASE , compressed_fb - > start ) ;
else if ( IS_GM45 ( dev ) ) {
I915_WRITE ( DPFC_CB_BASE , compressed_fb - > start ) ;
} else {
2012-04-24 16:47:39 +02:00
compressed_llb = drm_mm_search_free ( & dev_priv - > mm . stolen ,
4096 , 4096 , 0 ) ;
if ( compressed_llb )
compressed_llb = drm_mm_get_block ( compressed_llb ,
4096 , 4096 ) ;
if ( ! compressed_llb )
goto err_fb ;
2013-06-28 01:30:21 +02:00
dev_priv - > fbc . compressed_llb = compressed_llb ;
2012-11-15 12:32:20 +01:00
I915_WRITE ( FBC_CFB_BASE ,
dev_priv - > mm . stolen_base + compressed_fb - > start ) ;
I915_WRITE ( FBC_LL_BASE ,
dev_priv - > mm . stolen_base + compressed_llb - > start ) ;
2012-04-24 16:47:39 +02:00
}
2013-06-28 01:30:21 +02:00
dev_priv - > fbc . compressed_fb = compressed_fb ;
dev_priv - > fbc . size = size ;
2012-04-24 16:47:39 +02:00
2012-11-15 12:32:20 +01:00
DRM_DEBUG_KMS ( " reserved %d bytes of contiguous stolen space for FBC \n " ,
size ) ;
2012-04-24 16:47:39 +02:00
2012-11-15 12:32:20 +01:00
return 0 ;
2012-04-24 16:47:39 +02:00
err_fb :
drm_mm_put_block ( compressed_fb ) ;
err :
2013-04-27 13:44:16 +02:00
pr_info_once ( " drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this. \n " , size ) ;
2012-11-15 12:32:20 +01:00
return - ENOSPC ;
}
int i915_gem_stolen_setup_compression ( struct drm_device * dev , int size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-07-02 10:48:31 +02:00
if ( ! drm_mm_initialized ( & dev_priv - > mm . stolen ) )
2012-11-15 12:32:20 +01:00
return - ENODEV ;
2013-06-28 01:30:21 +02:00
if ( size < dev_priv - > fbc . size )
2012-11-15 12:32:20 +01:00
return 0 ;
/* Release any current block */
i915_gem_stolen_cleanup_compression ( dev ) ;
return i915_setup_compression ( dev , size ) ;
2012-04-24 16:47:39 +02:00
}
2012-11-15 12:32:20 +01:00
void i915_gem_stolen_cleanup_compression ( struct drm_device * dev )
2012-04-24 16:47:39 +02:00
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-06-28 01:30:21 +02:00
if ( dev_priv - > fbc . size = = 0 )
2012-11-15 12:32:20 +01:00
return ;
2013-06-28 01:30:21 +02:00
if ( dev_priv - > fbc . compressed_fb )
drm_mm_put_block ( dev_priv - > fbc . compressed_fb ) ;
2012-11-15 12:32:20 +01:00
2013-06-28 01:30:21 +02:00
if ( dev_priv - > fbc . compressed_llb )
drm_mm_put_block ( dev_priv - > fbc . compressed_llb ) ;
2012-11-15 12:32:20 +01:00
2013-06-28 01:30:21 +02:00
dev_priv - > fbc . size = 0 ;
2012-04-24 16:47:39 +02:00
}
void i915_gem_cleanup_stolen ( struct drm_device * dev )
{
2012-12-18 15:24:37 +01:00
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-07-02 10:48:31 +02:00
if ( ! drm_mm_initialized ( & dev_priv - > mm . stolen ) )
return ;
2012-11-15 12:32:20 +01:00
i915_gem_stolen_cleanup_compression ( dev ) ;
2012-12-18 15:24:37 +01:00
drm_mm_takedown ( & dev_priv - > mm . stolen ) ;
2012-04-24 16:47:39 +02:00
}
int i915_gem_init_stolen ( struct drm_device * dev )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-05-08 19:45:13 +02:00
int bios_reserved = 0 ;
2012-04-24 16:47:39 +02:00
2012-11-15 12:32:18 +01:00
dev_priv - > mm . stolen_base = i915_stolen_to_physical ( dev ) ;
if ( dev_priv - > mm . stolen_base = = 0 )
return 0 ;
2013-01-24 23:45:00 +01:00
DRM_DEBUG_KMS ( " found %zd bytes of stolen memory at %08lx \n " ,
dev_priv - > gtt . stolen_size , dev_priv - > mm . stolen_base ) ;
2012-11-15 12:32:18 +01:00
2013-05-08 19:45:13 +02:00
if ( IS_VALLEYVIEW ( dev ) )
bios_reserved = 1024 * 1024 ; /* top 1M on VLV/BYT */
2013-07-09 14:44:27 +02:00
if ( WARN_ON ( bios_reserved > dev_priv - > gtt . stolen_size ) )
return 0 ;
2012-04-24 16:47:39 +02:00
/* Basic memrange allocator for stolen space */
2013-05-08 19:45:13 +02:00
drm_mm_init ( & dev_priv - > mm . stolen , 0 , dev_priv - > gtt . stolen_size -
bios_reserved ) ;
2012-04-24 16:47:39 +02:00
return 0 ;
}
2012-11-15 12:32:26 +01:00
static struct sg_table *
i915_pages_create_for_stolen ( struct drm_device * dev ,
u32 offset , u32 size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct sg_table * st ;
struct scatterlist * sg ;
DRM_DEBUG_DRIVER ( " offset=0x%x, size=%d \n " , offset , size ) ;
2013-01-24 23:45:00 +01:00
BUG_ON ( offset > dev_priv - > gtt . stolen_size - size ) ;
2012-11-15 12:32:26 +01:00
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
* dma mapping in a single scatterlist .
*/
st = kmalloc ( sizeof ( * st ) , GFP_KERNEL ) ;
if ( st = = NULL )
return NULL ;
if ( sg_alloc_table ( st , 1 , GFP_KERNEL ) ) {
kfree ( st ) ;
return NULL ;
}
sg = st - > sgl ;
2013-03-26 14:14:19 +01:00
sg - > offset = offset ;
sg - > length = size ;
2012-11-15 12:32:26 +01:00
sg_dma_address ( sg ) = ( dma_addr_t ) dev_priv - > mm . stolen_base + offset ;
sg_dma_len ( sg ) = size ;
return st ;
}
static int i915_gem_object_get_pages_stolen ( struct drm_i915_gem_object * obj )
{
BUG ( ) ;
return - EINVAL ;
}
static void i915_gem_object_put_pages_stolen ( struct drm_i915_gem_object * obj )
{
/* Should only be called during free */
sg_free_table ( obj - > pages ) ;
kfree ( obj - > pages ) ;
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
. get_pages = i915_gem_object_get_pages_stolen ,
. put_pages = i915_gem_object_put_pages_stolen ,
} ;
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen ( struct drm_device * dev ,
struct drm_mm_node * stolen )
{
struct drm_i915_gem_object * obj ;
2012-11-15 12:32:30 +01:00
obj = i915_gem_object_alloc ( dev ) ;
2012-11-15 12:32:26 +01:00
if ( obj = = NULL )
return NULL ;
if ( drm_gem_private_object_init ( dev , & obj - > base , stolen - > size ) )
goto cleanup ;
i915_gem_object_init ( obj , & i915_gem_object_stolen_ops ) ;
obj - > pages = i915_pages_create_for_stolen ( dev ,
stolen - > start , stolen - > size ) ;
if ( obj - > pages = = NULL )
goto cleanup ;
obj - > has_dma_mapping = true ;
2013-05-31 23:46:19 +02:00
i915_gem_object_pin_pages ( obj ) ;
2012-11-15 12:32:26 +01:00
obj - > stolen = stolen ;
obj - > base . write_domain = I915_GEM_DOMAIN_GTT ;
obj - > base . read_domains = I915_GEM_DOMAIN_GTT ;
obj - > cache_level = I915_CACHE_NONE ;
return obj ;
cleanup :
2012-11-15 12:32:30 +01:00
i915_gem_object_free ( obj ) ;
2012-11-15 12:32:26 +01:00
return NULL ;
}
struct drm_i915_gem_object *
i915_gem_object_create_stolen ( struct drm_device * dev , u32 size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
struct drm_i915_gem_object * obj ;
struct drm_mm_node * stolen ;
2013-07-02 10:48:31 +02:00
if ( ! drm_mm_initialized ( & dev_priv - > mm . stolen ) )
2012-11-15 12:32:26 +01:00
return NULL ;
DRM_DEBUG_KMS ( " creating stolen object: size=%x \n " , size ) ;
if ( size = = 0 )
return NULL ;
stolen = drm_mm_search_free ( & dev_priv - > mm . stolen , size , 4096 , 0 ) ;
if ( stolen )
stolen = drm_mm_get_block ( stolen , size , 4096 ) ;
if ( stolen = = NULL )
return NULL ;
obj = _i915_gem_object_create_stolen ( dev , stolen ) ;
if ( obj )
return obj ;
drm_mm_put_block ( stolen ) ;
return NULL ;
}
2013-02-19 22:31:37 +01:00
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated ( struct drm_device * dev ,
u32 stolen_offset ,
u32 gtt_offset ,
u32 size )
{
struct drm_i915_private * dev_priv = dev - > dev_private ;
2013-08-01 01:59:59 +02:00
struct i915_address_space * ggtt = & dev_priv - > gtt . base ;
2013-02-19 22:31:37 +01:00
struct drm_i915_gem_object * obj ;
struct drm_mm_node * stolen ;
2013-07-17 21:19:03 +02:00
struct i915_vma * vma ;
2013-07-05 23:41:02 +02:00
int ret ;
2013-02-19 22:31:37 +01:00
2013-07-02 10:48:31 +02:00
if ( ! drm_mm_initialized ( & dev_priv - > mm . stolen ) )
2013-02-19 22:31:37 +01:00
return NULL ;
DRM_DEBUG_KMS ( " creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x \n " ,
stolen_offset , gtt_offset , size ) ;
/* KISS and expect everything to be page-aligned */
BUG_ON ( stolen_offset & 4095 ) ;
BUG_ON ( size & 4095 ) ;
if ( WARN_ON ( size = = 0 ) )
return NULL ;
2013-07-05 23:41:02 +02:00
stolen = kzalloc ( sizeof ( * stolen ) , GFP_KERNEL ) ;
if ( ! stolen )
return NULL ;
2013-07-05 23:41:03 +02:00
stolen - > start = stolen_offset ;
stolen - > size = size ;
ret = drm_mm_reserve_node ( & dev_priv - > mm . stolen , stolen ) ;
2013-07-05 23:41:02 +02:00
if ( ret ) {
2013-02-19 22:31:37 +01:00
DRM_DEBUG_KMS ( " failed to allocate stolen space \n " ) ;
2013-07-05 23:41:02 +02:00
kfree ( stolen ) ;
2013-02-19 22:31:37 +01:00
return NULL ;
}
obj = _i915_gem_object_create_stolen ( dev , stolen ) ;
if ( obj = = NULL ) {
DRM_DEBUG_KMS ( " failed to allocate stolen object \n " ) ;
drm_mm_put_block ( stolen ) ;
return NULL ;
}
2013-05-08 19:45:14 +02:00
/* Some objects just need physical mem from stolen space */
2013-07-04 13:06:28 +02:00
if ( gtt_offset = = I915_GTT_OFFSET_NONE )
2013-05-08 19:45:14 +02:00
return obj ;
2013-08-01 01:59:59 +02:00
vma = i915_gem_vma_create ( obj , ggtt ) ;
2013-07-19 07:45:46 +02:00
if ( IS_ERR ( vma ) ) {
ret = PTR_ERR ( vma ) ;
2013-07-17 21:19:03 +02:00
goto err_out ;
}
2013-02-19 22:31:37 +01:00
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space . The actual reservation will occur
* later .
*/
2013-07-17 21:19:03 +02:00
vma - > node . start = gtt_offset ;
vma - > node . size = size ;
2013-08-01 01:59:59 +02:00
if ( drm_mm_initialized ( & ggtt - > mm ) ) {
ret = drm_mm_reserve_node ( & ggtt - > mm , & vma - > node ) ;
2013-07-05 23:41:02 +02:00
if ( ret ) {
2013-02-19 22:31:37 +01:00
DRM_DEBUG_KMS ( " failed to allocate stolen GTT space \n " ) ;
2013-07-17 21:19:03 +02:00
i915_gem_vma_destroy ( vma ) ;
2013-07-17 21:19:02 +02:00
goto err_out ;
2013-02-19 22:31:37 +01:00
}
2013-07-05 23:41:05 +02:00
}
2013-02-19 22:31:37 +01:00
obj - > has_global_gtt_mapping = 1 ;
2013-05-31 20:28:48 +02:00
list_add_tail ( & obj - > global_list , & dev_priv - > mm . bound_list ) ;
2013-08-01 02:00:14 +02:00
list_add_tail ( & vma - > mm_list , & ggtt - > inactive_list ) ;
2013-02-19 22:31:37 +01:00
return obj ;
2013-07-05 23:41:02 +02:00
2013-07-17 21:19:02 +02:00
err_out :
drm_mm_put_block ( stolen ) ;
2013-07-05 23:41:02 +02:00
drm_gem_object_unreference ( & obj - > base ) ;
return NULL ;
2013-02-19 22:31:37 +01:00
}
2012-11-15 12:32:26 +01:00
void
i915_gem_object_release_stolen ( struct drm_i915_gem_object * obj )
{
if ( obj - > stolen ) {
drm_mm_put_block ( obj - > stolen ) ;
obj - > stolen = NULL ;
}
}