Merge branch 's3c-fixes-rc4' of git://aeryn.fluff.org.uk/bjdooks/linux

This commit is contained in:
Russell King 2009-07-30 10:47:55 +01:00 committed by Russell King
commit 0a52ac8d18
300 changed files with 10088 additions and 5928 deletions

View File

@ -23,7 +23,8 @@ interface.
Using sysfs
~~~~~~~~~~~
sysfs is always compiled in. You can access it by doing:
sysfs is always compiled in if CONFIG_SYSFS is defined. You can access
it by doing:
mount -t sysfs sysfs /sys

View File

@ -101,6 +101,8 @@ card*/pcm*/xrun_debug
bit 0 = Enable XRUN/jiffies debug messages
bit 1 = Show stack trace at XRUN / jiffies check
bit 2 = Enable additional jiffies check
bit 3 = Log hwptr update at each period interrupt
bit 4 = Log hwptr update at each snd_pcm_update_hw_ptr()
When the bit 0 is set, the driver will show the messages to
kernel log when an xrun is detected. The debug message is
@ -117,6 +119,9 @@ card*/pcm*/xrun_debug
buggy) hardware that doesn't give smooth pointer updates.
This feature is enabled via the bit 2.
Bits 3 and 4 are for logging the hwptr records. Note that
these will give flood of kernel messages.
card*/pcm*/sub*/info
The general information of this PCM sub-stream.

View File

@ -20,7 +20,7 @@
19 -> EM2860/SAA711X Reference Design (em2860)
20 -> AMD ATI TV Wonder HD 600 (em2880) [0438:b002]
21 -> eMPIA Technology, Inc. GrabBeeX+ Video Encoder (em2800) [eb1a:2801]
22 -> Unknown EM2750/EM2751 webcam grabber (em2750) [eb1a:2750,eb1a:2751]
22 -> EM2710/EM2750/EM2751 webcam grabber (em2750) [eb1a:2750,eb1a:2751]
23 -> Huaqi DLCW-130 (em2750)
24 -> D-Link DUB-T210 TV Tuner (em2820/em2840) [2001:f112]
25 -> Gadmei UTV310 (em2820/em2840)

View File

@ -44,7 +44,9 @@ zc3xx 0458:7007 Genius VideoCam V2
zc3xx 0458:700c Genius VideoCam V3
zc3xx 0458:700f Genius VideoCam Web V2
sonixj 0458:7025 Genius Eye 311Q
sn9c20x 0458:7029 Genius Look 320s
sonixj 0458:702e Genius Slim 310 NB
sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650)
sonixj 045e:00f5 MicroSoft VX3000
sonixj 045e:00f7 MicroSoft VX1000
ov519 045e:028c Micro$oft xbox cam
@ -282,6 +284,28 @@ sonixj 0c45:613a Microdia Sonix PC Camera
sonixj 0c45:613b Surfer SN-206
sonixj 0c45:613c Sonix Pccam168
sonixj 0c45:6143 Sonix Pccam168
sn9c20x 0c45:6240 PC Camera (SN9C201 + MT9M001)
sn9c20x 0c45:6242 PC Camera (SN9C201 + MT9M111)
sn9c20x 0c45:6248 PC Camera (SN9C201 + OV9655)
sn9c20x 0c45:624e PC Camera (SN9C201 + SOI968)
sn9c20x 0c45:624f PC Camera (SN9C201 + OV9650)
sn9c20x 0c45:6251 PC Camera (SN9C201 + OV9650)
sn9c20x 0c45:6253 PC Camera (SN9C201 + OV9650)
sn9c20x 0c45:6260 PC Camera (SN9C201 + OV7670)
sn9c20x 0c45:6270 PC Camera (SN9C201 + MT9V011/MT9V111/MT9V112)
sn9c20x 0c45:627b PC Camera (SN9C201 + OV7660)
sn9c20x 0c45:627c PC Camera (SN9C201 + HV7131R)
sn9c20x 0c45:627f PC Camera (SN9C201 + OV9650)
sn9c20x 0c45:6280 PC Camera (SN9C202 + MT9M001)
sn9c20x 0c45:6282 PC Camera (SN9C202 + MT9M111)
sn9c20x 0c45:6288 PC Camera (SN9C202 + OV9655)
sn9c20x 0c45:628e PC Camera (SN9C202 + SOI968)
sn9c20x 0c45:628f PC Camera (SN9C202 + OV9650)
sn9c20x 0c45:62a0 PC Camera (SN9C202 + OV7670)
sn9c20x 0c45:62b0 PC Camera (SN9C202 + MT9V011/MT9V111/MT9V112)
sn9c20x 0c45:62b3 PC Camera (SN9C202 + OV9655)
sn9c20x 0c45:62bb PC Camera (SN9C202 + OV7660)
sn9c20x 0c45:62bc PC Camera (SN9C202 + HV7131R)
sunplus 0d64:0303 Sunplus FashionCam DXG
etoms 102c:6151 Qcam Sangha CIF
etoms 102c:6251 Qcam xxxxxx VGA
@ -290,6 +314,7 @@ spca561 10fd:7e50 FlyCam Usb 100
zc3xx 10fd:8050 Typhoon Webshot II USB 300k
ov534 1415:2000 Sony HD Eye for PS3 (SLEH 00201)
pac207 145f:013a Trust WB-1300N
sn9c20x 145f:013d Trust WB-3600R
vc032x 15b8:6001 HP 2.0 Megapixel
vc032x 15b8:6002 HP 2.0 Megapixel rz406aa
spca501 1776:501c Arowana 300K CMOS Camera
@ -300,4 +325,11 @@ spca500 2899:012c Toptro Industrial
spca508 8086:0110 Intel Easy PC Camera
spca500 8086:0630 Intel Pocket PC Camera
spca506 99fa:8988 Grandtec V.cap
sn9c20x a168:0610 Dino-Lite Digital Microscope (SN9C201 + HV7131R)
sn9c20x a168:0611 Dino-Lite Digital Microscope (SN9C201 + HV7131R)
sn9c20x a168:0613 Dino-Lite Digital Microscope (SN9C201 + HV7131R)
sn9c20x a168:0618 Dino-Lite Digital Microscope (SN9C201 + HV7131R)
sn9c20x a168:0614 Dino-Lite Digital Microscope (SN9C201 + MT9M111)
sn9c20x a168:0615 Dino-Lite Digital Microscope (SN9C201 + MT9M111)
sn9c20x a168:0617 Dino-Lite Digital Microscope (SN9C201 + MT9M111)
spca561 abcd:cdee Petcam

View File

@ -9,7 +9,7 @@
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
#endif

View File

@ -102,8 +102,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)

View File

@ -28,7 +28,7 @@ static inline struct s3c_gpio_chip *s3c_gpiolib_getchip(unsigned int pin)
return NULL;
chip = &s3c24xx_gpios[pin/32];
return (S3C2410_GPIO_OFFSET(pin) > chip->chip.ngpio) ? chip : NULL;
return (S3C2410_GPIO_OFFSET(pin) < chip->chip.ngpio) ? chip : NULL;
}
#endif /* __ASM_ARCH_GPIO_CORE_H */

View File

@ -153,7 +153,7 @@ static unsigned long s3c64xx_clk_arm_round_rate(struct clk *clk,
u32 div;
if (parent < rate)
return rate;
return parent;
div = (parent / rate) - 1;
if (div > armclk_mask)
@ -175,7 +175,7 @@ static int s3c64xx_clk_arm_set_rate(struct clk *clk, unsigned long rate)
div = clk_get_rate(clk->parent) / rate;
val = __raw_readl(S3C_CLK_DIV0);
val &= armclk_mask;
val &= ~armclk_mask;
val |= (div - 1);
__raw_writel(val, S3C_CLK_DIV0);

View File

@ -83,7 +83,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
quicklist_free_page(QUICK_PT, NULL, pte);
}
#define __pte_free_tlb(tlb,pte) \
#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \

View File

@ -47,7 +47,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
#define __pte_free_tlb(tlb,pte) \
#define __pte_free_tlb(tlb,pte,address) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \

View File

@ -49,7 +49,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
#define __pte_free_tlb(tlb,pte) \
#define __pte_free_tlb(tlb,pte,address) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb),(pte)); \
@ -62,7 +62,7 @@ do { \
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); })
#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define __pmd_free_tlb(tlb,x,a) do { } while (0)
#endif /* CONFIG_MMU */

View File

@ -225,7 +225,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
*/
#define pud_alloc_one(mm, address) NULL
#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
#define __pud_free_tlb(tlb, x, address) do { } while (0)
/*
* The "pud_xxx()" functions here are trivial for a folded two-level

View File

@ -48,7 +48,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
quicklist_free(0, NULL, pud);
}
#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud)
#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
#endif /* CONFIG_PGTABLE_4 */
static inline void
@ -67,7 +67,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
quicklist_free(0, NULL, pmd);
}
#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
@ -117,6 +117,6 @@ static inline void check_pgt_cache(void)
quicklist_trim(0, NULL, 25, 16);
}
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#endif /* _ASM_IA64_PGALLOC_H */

View File

@ -236,22 +236,22 @@ do { \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
#define pte_free_tlb(tlb, ptep) \
#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__pte_free_tlb(tlb, ptep); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#define pmd_free_tlb(tlb, ptep) \
#define pmd_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__pmd_free_tlb(tlb, ptep); \
__pmd_free_tlb(tlb, ptep, address); \
} while (0)
#define pud_free_tlb(tlb, pudp) \
#define pud_free_tlb(tlb, pudp, address) \
do { \
tlb->need_flush = 1; \
__pud_free_tlb(tlb, pudp); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif /* _ASM_IA64_TLB_H */

View File

@ -58,7 +58,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
@ -68,7 +68,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0)

View File

@ -54,7 +54,8 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
__free_page(page);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
pgtable_page_dtor(page);
cache_page(kmap(page));
@ -73,7 +74,8 @@ static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
return free_pointer_table(pmd);
}
static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
return free_pointer_table(pmd);
}

View File

@ -32,7 +32,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
__free_page(page);
}
#define __pte_free_tlb(tlb,pte) \
#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
@ -80,7 +80,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{

View File

@ -6,14 +6,16 @@ endif
# What CPU vesion are we building for, and crack it open
# as major.minor.rev
CPU_VER=$(subst ",,$(CONFIG_XILINX_MICROBLAZE0_HW_VER) )
CPU_MAJOR=$(shell echo $(CPU_VER) | cut -d '.' -f 1)
CPU_MINOR=$(shell echo $(CPU_VER) | cut -d '.' -f 2)
CPU_REV=$(shell echo $(CPU_VER) | cut -d '.' -f 3)
CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER))
CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1)
CPU_MINOR := $(shell echo $(CPU_VER) | cut -d '.' -f 2)
CPU_REV := $(shell echo $(CPU_VER) | cut -d '.' -f 3)
export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV
# Use cpu-related CONFIG_ vars to set compile options.
# The various CONFIG_XILINX cpu features options are integers 0/1/2...
# rather than bools y/n
# Work out HW multipler support. This is icky.
# 1. Spartan2 has no HW multiplers.
@ -34,30 +36,29 @@ CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare
CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
# The various CONFIG_XILINX cpu features options are integers 0/1/2...
# rather than bools y/n
# r31 holds current when in kernel mode
CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
KBUILD_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
LDFLAGS :=
LDFLAGS_vmlinux :=
LDFLAGS_BLOB := --format binary --oformat elf32-microblaze
LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name)
LIBGCC := $(shell $(CC) $(KBUILD_KERNEL) -print-libgcc-file-name)
head-y := arch/microblaze/kernel/head.o
libs-y += arch/microblaze/lib/ $(LIBGCC)
core-y += arch/microblaze/kernel/ arch/microblaze/mm/ \
arch/microblaze/platform/
head-y := arch/microblaze/kernel/head.o
libs-y += arch/microblaze/lib/
libs-y += $(LIBGCC)
core-y += arch/microblaze/kernel/
core-y += arch/microblaze/mm/
core-y += arch/microblaze/platform/
boot := arch/$(ARCH)/boot
boot := arch/microblaze/boot
# defines filename extension depending memory management type
ifeq ($(CONFIG_MMU),)
MMUEXT := -nommu
MMU := -nommu
endif
export MMUEXT
export MMU
all: linux.bin

View File

@ -14,7 +14,6 @@
#include <asm/byteorder.h>
#include <asm/page.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/mm.h> /* Get struct page {...} */

View File

@ -180,7 +180,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
__free_page(ptepage);
}
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte))
@ -193,7 +193,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
*/
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
/*#define pmd_free(mm, x) do { } while (0)*/
#define __pmd_free_tlb(tlb, x) do { } while (0)
#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
extern int do_check_pgt_cache(int, int);

View File

@ -185,6 +185,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/* Definitions for MicroBlaze. */
#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
@ -320,8 +321,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
/* FIXME */
static inline int pte_file(pte_t pte) { return 0; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@ -488,7 +488,7 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
/* Encode and decode a nonlinear file mapping entry */
#define PTE_FILE_MAX_BITS 29
#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) })
#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];

View File

@ -16,6 +16,18 @@
#define _ASM_MICROBLAZE_PROM_H
#ifdef __KERNEL__
/* Definitions used by the flattened device tree */
#define OF_DT_HEADER 0xd00dfeed /* marker */
#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
#define OF_DT_END_NODE 0x2 /* End node */
#define OF_DT_PROP 0x3 /* Property: name off, size, content */
#define OF_DT_NOP 0x4 /* nop */
#define OF_DT_END 0x9
#define OF_DT_VERSION 0x10
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/platform_device.h>
@ -29,16 +41,6 @@
#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
/* Definitions used by the flattened device tree */
#define OF_DT_HEADER 0xd00dfeed /* marker */
#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
#define OF_DT_END_NODE 0x2 /* End node */
#define OF_DT_PROP 0x3 /* Property: name off, size, content */
#define OF_DT_NOP 0x4 /* nop */
#define OF_DT_END 0x9
#define OF_DT_VERSION 0x10
/*
* This is what gets passed to the kernel by prom_init or kexec
*
@ -309,5 +311,6 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
*/
#include <linux/of.h>
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_PROM_H */

View File

@ -11,7 +11,7 @@
#ifndef _ASM_MICROBLAZE_TLB_H
#define _ASM_MICROBLAZE_TLB_H
#define tlb_flush(tlb) do {} while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>

View File

@ -189,7 +189,7 @@ extern long strnlen_user(const char *src, long count);
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __gu_val = x; \
__typeof__(*(ptr)) volatile __gu_val = (x); \
long __gu_err = 0; \
switch (sizeof(__gu_val)) { \
case 1: \

View File

@ -17,4 +17,4 @@ obj-$(CONFIG_HEART_BEAT) += heartbeat.o
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o
obj-y += entry$(MMUEXT).o
obj-y += entry$(MMU).o

View File

@ -22,7 +22,7 @@
#define CI(c, p) { ci->c = PVR_##p(pvr); }
#define err_printk(x) \
early_printk("ERROR: Microblaze " x " - different for PVR and DTS\n");
early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
{

View File

@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
#define err_printk(x) \
early_printk("ERROR: Microblaze " x "- different for kernel and DTS\n");
early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
{

View File

@ -26,6 +26,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"7.10.b", 0x09},
{"7.10.c", 0x0a},
{"7.10.d", 0x0b},
{"7.20.a", 0x0c},
{"7.20.b", 0x0d},
/* FIXME There is no keycode defined in MBV for these versions */
{"2.10.a", 0x10},
{"3.00.a", 0x20},

View File

@ -31,6 +31,7 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/prom.h> /* for OF_DT_HEADER */
#ifdef CONFIG_MMU
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
@ -54,11 +55,19 @@ ENTRY(_start)
andi r1, r1, ~2
mts rmsr, r1
/* save fdt to kernel location */
/* r7 stores pointer to fdt blob */
beqi r7, no_fdt_arg
/* r7 may point to an FDT, or there may be one linked in.
if it's in r7, we've got to save it away ASAP.
We ensure r7 points to a valid FDT, just in case the bootloader
is broken or non-existent */
beqi r7, no_fdt_arg /* NULL pointer? don't copy */
lw r11, r0, r7 /* Does r7 point to a */
rsubi r11, r11, OF_DT_HEADER /* valid FDT? */
beqi r11, _prepare_copy_fdt
or r7, r0, r0 /* clear R7 when not valid DTB */
bnei r11, no_fdt_arg /* No - get out of here */
_prepare_copy_fdt:
or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_fdt_start) /* save bram context */
ori r4, r0, TOPHYS(_fdt_start)
ori r3, r0, (0x4000 - 4)
_copy_fdt:
lw r12, r7, r11 /* r12 = r7 + r11 */

View File

@ -74,6 +74,7 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/asm-offsets.h>
/* Helpful Macros */
@ -428,19 +429,9 @@ handle_unaligned_ex:
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
_no_delayslot:
#endif
#ifdef CONFIG_MMU
/* Check if unaligned address is last on a 4k page */
andi r5, r4, 0xffc
xori r5, r5, 0xffc
bnei r5, _unaligned_ex2
_unaligned_ex1:
RESTORE_STATE;
/* Another page must be accessed or physical address not in page table */
bri unaligned_data_trap
_unaligned_ex2:
/* jump to high level unaligned handler */
RESTORE_STATE;
bri unaligned_data_trap
#endif
andi r6, r3, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
@ -450,45 +441,6 @@ _no_delayslot:
srl r6, r6;
/* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op);
#ifdef CONFIG_MMU
/* Get physical address */
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r5, r0, CONFIG_KERNEL_START
cmpu r5, r4, r5
bgti r5, _unaligned_ex3
ori r5, r0, swapper_pg_dir
bri _unaligned_ex4
/* Get the PGD for the current thread. */
_unaligned_ex3: /* user thread */
addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */
lwi r5, r5, TASK_THREAD + PGDIR
_unaligned_ex4:
tophys(r5,r5)
BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */
andi r6, r6, 0xffc
/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */
or r5, r5, r6
lwi r6, r5, 0 /* Get L1 entry */
andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */
beqi r5, _unaligned_ex1 /* Bail if no table */
tophys(r5,r5)
BSRLI(r6,r4,10) /* Compute PTE address */
andi r6, r6, 0xffc
andi r5, r5, 0xfffff003
or r5, r5, r6
lwi r5, r5, 0 /* Get Linux PTE */
andi r6, r5, _PAGE_PRESENT
beqi r6, _unaligned_ex1 /* Bail if no page */
andi r5, r5, 0xfffff000 /* Extract RPN */
andi r4, r4, 0x00000fff /* Extract offset */
or r4, r4, r5 /* Create physical address */
#endif /* CONFIG_MMU */
andi r6, r3, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw;
@ -959,15 +911,15 @@ _unaligned_data_exception:
andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
ex_lw_vm:
beqid r6, ex_lhw_vm;
lbui r5, r4, 0; /* Exception address in r4 - delay slot */
load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */
/* Load a word, byte-by-byte from destination address and save it in tmp space*/
la r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
lbui r5, r4, 1;
load2: lbui r5, r4, 1;
sbi r5, r6, 1;
lbui r5, r4, 2;
load3: lbui r5, r4, 2;
sbi r5, r6, 2;
lbui r5, r4, 3;
load4: lbui r5, r4, 3;
sbi r5, r6, 3;
brid ex_lw_tail_vm;
/* Get the destination register value into r3 - delay slot */
@ -977,7 +929,7 @@ ex_lhw_vm:
* save it in tmp space */
la r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
lbui r5, r4, 1;
load5: lbui r5, r4, 1;
sbi r5, r6, 1;
lhui r3, r6, 0; /* Get the destination register value into r3 */
ex_lw_tail_vm:
@ -996,22 +948,53 @@ ex_sw_tail_vm:
swi r3, r5, 0; /* Get the word - delay slot */
/* Store the word, byte-by-byte into destination address */
lbui r3, r5, 0;
sbi r3, r4, 0;
store1: sbi r3, r4, 0;
lbui r3, r5, 1;
sbi r3, r4, 1;
store2: sbi r3, r4, 1;
lbui r3, r5, 2;
sbi r3, r4, 2;
store3: sbi r3, r4, 2;
lbui r3, r5, 3;
brid ret_from_exc;
sbi r3, r4, 3; /* Delay slot */
store4: sbi r3, r4, 3; /* Delay slot */
ex_shw_vm:
/* Store the lower half-word, byte-by-byte into destination address */
lbui r3, r5, 2;
sbi r3, r4, 0;
store5: sbi r3, r4, 0;
lbui r3, r5, 3;
brid ret_from_exc;
sbi r3, r4, 1; /* Delay slot */
store6: sbi r3, r4, 1; /* Delay slot */
ex_sw_end_vm: /* Exception handling of store word, ends. */
/* We have to prevent cases that get/put_user macros get unaligned pointer
* to bad page area. We have to find out which origin instruction caused it
* and called fixup for that origin instruction not instruction in unaligned
* handler */
ex_unaligned_fixup:
ori r5, r7, 0 /* setup pointer to pt_regs */
lwi r6, r7, PT_PC; /* faulting address is one instruction above */
addik r6, r6, -4 /* for finding proper fixup */
swi r6, r7, PT_PC; /* a save back it to PT_PC */
addik r7, r0, SIGSEGV
/* call bad_page_fault for finding aligned fixup, fixup address is saved
* in PT_PC which is used as return address from exception */
la r15, r0, ret_from_exc-8 /* setup return address */
brid bad_page_fault
nop
/* We prevent all load/store because it could failed any attempt to access */
.section __ex_table,"a";
.word load1,ex_unaligned_fixup;
.word load2,ex_unaligned_fixup;
.word load3,ex_unaligned_fixup;
.word load4,ex_unaligned_fixup;
.word load5,ex_unaligned_fixup;
.word store1,ex_unaligned_fixup;
.word store2,ex_unaligned_fixup;
.word store3,ex_unaligned_fixup;
.word store4,ex_unaligned_fixup;
.word store5,ex_unaligned_fixup;
.word store6,ex_unaligned_fixup;
.previous;
.end _unaligned_data_exception
#endif /* CONFIG_MMU */

View File

@ -57,7 +57,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
unsigned long int *location;
unsigned long int locoffs;
unsigned long int value;
#if __GNUC__ < 4
unsigned long int old_value;
@ -113,10 +112,12 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
break;
case R_MICROBLAZE_64_PCREL:
locoffs = (location[0] & 0xFFFF) << 16 |
#if __GNUC__ < 4
old_value = (location[0] & 0xFFFF) << 16 |
(location[1] & 0xFFFF);
value -= (unsigned long int)(location) + 4 +
locoffs;
value -= old_value;
#endif
value -= (unsigned long int)(location) + 4;
location[0] = (location[0] & 0xFFFF0000) |
(value >> 16);
location[1] = (location[1] & 0xFFFF0000) |
@ -125,6 +126,14 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
value);
break;
case R_MICROBLAZE_32_PCREL_LO:
pr_debug("R_MICROBLAZE_32_PCREL_LO\n");
break;
case R_MICROBLAZE_64_NONE:
pr_debug("R_MICROBLAZE_NONE\n");
break;
case R_MICROBLAZE_NONE:
pr_debug("R_MICROBLAZE_NONE\n");
break;
@ -133,7 +142,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
printk(KERN_ERR "module %s: "
"Unknown relocation: %u\n",
module->name,
ELF32_R_TYPE(rela->r_info));
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}

View File

@ -138,8 +138,12 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL);
#endif
early_printk("Ramdisk addr 0x%08x, FDT 0x%08x\n", ram, fdt);
printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt);
early_printk("Ramdisk addr 0x%08x, ", ram);
if (fdt)
early_printk("FDT at 0x%08x\n", fdt);
else
early_printk("Compiled-in FDT at 0x%08x\n",
(unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
early_printk("Found romfs @ 0x%08x (0x%08x)\n",

View File

@ -33,105 +33,6 @@
#include <linux/unistd.h>
#include <asm/syscalls.h>
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
* This is really horribly ugly. This will be remove with new toolchain.
*/
asmlinkage long
sys_ipc(uint call, int first, int second, int third, void *ptr, long fifth)
{
int version, ret;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
ret = -EINVAL;
switch (call) {
case SEMOP:
ret = sys_semop(first, (struct sembuf *)ptr, second);
break;
case SEMGET:
ret = sys_semget(first, second, third);
break;
case SEMCTL:
{
union semun fourth;
if (!ptr)
break;
ret = (access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
|| (get_user(fourth.__pad, (void **)ptr)) ;
if (ret)
break;
ret = sys_semctl(first, second, third, fourth);
break;
}
case MSGSND:
ret = sys_msgsnd(first, (struct msgbuf *) ptr, second, third);
break;
case MSGRCV:
switch (version) {
case 0: {
struct ipc_kludge tmp;
if (!ptr)
break;
ret = (access_ok(VERIFY_READ, ptr, sizeof(tmp))
? 0 : -EFAULT) || copy_from_user(&tmp,
(struct ipc_kludge *) ptr, sizeof(tmp));
if (ret)
break;
ret = sys_msgrcv(first, tmp.msgp, second, tmp.msgtyp,
third);
break;
}
default:
ret = sys_msgrcv(first, (struct msgbuf *) ptr,
second, fifth, third);
break;
}
break;
case MSGGET:
ret = sys_msgget((key_t) first, second);
break;
case MSGCTL:
ret = sys_msgctl(first, second, (struct msqid_ds *) ptr);
break;
case SHMAT:
switch (version) {
default: {
ulong raddr;
ret = access_ok(VERIFY_WRITE, (ulong *) third,
sizeof(ulong)) ? 0 : -EFAULT;
if (ret)
break;
ret = do_shmat(first, (char *) ptr, second, &raddr);
if (ret)
break;
ret = put_user(raddr, (ulong *) third);
break;
}
case 1: /* iBCS2 emulator entry point */
if (!segment_eq(get_fs(), get_ds()))
break;
ret = do_shmat(first, (char *) ptr, second,
(ulong *) third);
break;
}
break;
case SHMDT:
ret = sys_shmdt((char *)ptr);
break;
case SHMGET:
ret = sys_shmget(first, second, third);
break;
case SHMCTL:
ret = sys_shmctl(first, second, (struct shmid_ds *) ptr);
break;
}
return ret;
}
asmlinkage long microblaze_vfork(struct pt_regs *regs)
{

View File

@ -121,7 +121,7 @@ ENTRY(sys_call_table)
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc
.long sys_ni_syscall /* old sys_ipc */
.long sys_fsync
.long sys_ni_syscall /* sys_sigreturn_wrapper */
.long sys_clone /* 120 */

View File

@ -69,7 +69,7 @@ static int store_updates_sp(struct pt_regs *regs)
* It is called from do_page_fault above and from some of the procedures
* in traps.c.
*/
static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *fixup;
/* MS: no context */
@ -122,15 +122,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
#endif /* CONFIG_KGDB */
if (in_atomic() || mm == NULL) {
/* FIXME */
if (kernel_mode(regs)) {
printk(KERN_EMERG
"Page fault in kernel mode - Oooou!!! pid %d\n",
current->pid);
_exception(SIGSEGV, regs, code, address);
return;
}
if (in_atomic() || !mm) {
if (kernel_mode(regs))
goto bad_area_nosemaphore;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "

View File

@ -98,23 +98,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_pages(pte, PTE_ORDER);
}
#define __pte_free_tlb(tlb,pte) \
#define __pte_free_tlb(tlb,pte,address) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
#ifdef CONFIG_32BIT
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#endif
#ifdef CONFIG_64BIT
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
@ -132,7 +121,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_pages((unsigned long)pmd, PMD_ORDER);
}
#define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x)
#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
#endif

View File

@ -51,6 +51,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
}
#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
#endif /* _ASM_PGALLOC_H */

View File

@ -21,7 +21,7 @@ do { if (!(tlb)->fullmm) \
#include <asm-generic/tlb.h>
#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif

View File

@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
*/
/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define __pmd_free_tlb(tlb,x,a) do { } while (0)
/* #define pgd_populate(mm, pmd, pte) BUG() */
#ifndef CONFIG_BOOKE

View File

@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf)
kmem_cache_free(pgtable_cache[cachenum], p);
}
#define __pmd_free_tlb(tlb, pmd) \
#define __pmd_free_tlb(tlb, pmd,addr) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
#ifndef CONFIG_PPC_64K_PAGES
#define __pud_free_tlb(tlb, pud) \
#define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
#endif /* CONFIG_PPC_64K_PAGES */

View File

@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#ifdef CONFIG_SMP
#define __pte_free_tlb(tlb,ptepage) \
#define __pte_free_tlb(tlb,ptepage,address) \
do { \
pgtable_page_dtor(ptepage); \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
} while (0)
#else
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte))
#endif

View File

@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd);
pmd_free_tlb(tlb, pmd, start);
}
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud);
pud_free_tlb(tlb, pud, start);
}
/*

View File

@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address)
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = pte;
@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
* as the pgd. pmd_free_tlb checks the asce_limit against 2GB
* to avoid the double free of the pmd in this case.
*/
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 31))
@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
* as the pgd. pud_free_tlb checks the asce_limit against 4TB
* to avoid the double free of the pud in this case.
*/
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 42))

View File

@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void)
machine_flags |= MACHINE_FLAG_VM;
}
static void early_pgm_check_handler(void)
static __init void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
@ -222,7 +222,7 @@ static void early_pgm_check_handler(void)
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
}
void setup_lowcore_early(void)
static noinline __init void setup_lowcore_early(void)
{
psw_t psw;

View File

@ -687,13 +687,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area;
#else
if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
BUG();
#endif
set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable();
local_irq_enable();
#ifdef CONFIG_64BIT
if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
BUG();
#endif
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);

View File

@ -88,10 +88,17 @@ __kernel_clock_gettime:
llilh %r4,0x0100
sar %a4,%r4
lghi %r4,0
epsw %r5,0
sacf 512 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
sacf 0
sar %a4,%r2
tml %r5,0x4000
jo 11f
tml %r5,0x8000
jno 10f
sacf 256
j 11f
10: sacf 0
11: sar %a4,%r2
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */

View File

@ -7,24 +7,36 @@
*
*/
#include <asm/system.h>
/*
* save CPU registers before creating a hibernation image and before
* restoring the memory state from it
*/
void save_processor_state(void)
{
/* implentation contained in the
* swsusp_arch_suspend function
/* swsusp_arch_suspend() actually saves all cpu register contents.
* Machine checks must be disabled since swsusp_arch_suspend() stores
* register contents to their lowcore save areas. That's the same
* place where register contents on machine checks would be saved.
* To avoid register corruption disable machine checks.
* We must also disable machine checks in the new psw mask for
* program checks, since swsusp_arch_suspend() may generate program
* checks. Disabling machine checks for all other new psw masks is
* just paranoia.
*/
local_mcck_disable();
/* Disable lowcore protection */
__ctl_clear_bit(0,28);
S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
}
/*
* restore the contents of CPU registers
*/
void restore_processor_state(void)
{
/* implentation contained in the
* swsusp_arch_resume function
*/
S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
/* Enable lowcore protection */
__ctl_set_bit(0,28);
local_mcck_enable();
}

View File

@ -32,19 +32,14 @@ swsusp_arch_suspend:
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
/* Switch off lowcore protection */
stctg %c0,%c0,__SF_EMPTY(%r15)
ni __SF_EMPTY+4(%r15),0xef
lctlg %c0,%c0,__SF_EMPTY(%r15)
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
/* Setup base register for lowcore (absolute 0) */
llgf %r1,__SF_EMPTY(%r15)
/* Save prefix register contents for lowcore */
llgf %r4,__SF_EMPTY(%r15)
/* Get pointer to save area */
aghi %r1,0x1000
lghi %r1,0x1000
/* Store registers */
mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
@ -79,17 +74,15 @@ swsusp_arch_suspend:
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
/* Setup lowcore */
brasl %r14,setup_lowcore_early
lghi %r2,0
lghi %r3,2*PAGE_SIZE
lghi %r5,2*PAGE_SIZE
1: mvcle %r2,%r4,0
jo 1b
/* Save image */
brasl %r14,swsusp_save
/* Switch on lowcore protection */
stctg %c0,%c0,__SF_EMPTY(%r15)
oi __SF_EMPTY+4(%r15),0x10
lctlg %c0,%c0,__SF_EMPTY(%r15)
/* Restore prefix register and return */
lghi %r1,0x1000
spx 0x318(%r1)
@ -117,11 +110,6 @@ swsusp_arch_resume:
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
/* Switch off lowcore protection */
stctg %c0,%c0,__SF_EMPTY(%r15)
ni __SF_EMPTY+4(%r15),0xef
lctlg %c0,%c0,__SF_EMPTY(%r15)
/* Set prefix page to zero */
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
@ -175,7 +163,7 @@ swsusp_arch_resume:
/* Load old stack */
lg %r15,0x2f8(%r13)
/* Pointer to save arae */
/* Pointer to save area */
lghi %r13,0x1000
#ifdef CONFIG_SMP
@ -187,11 +175,6 @@ swsusp_arch_resume:
/* Restore prefix register */
spx 0x318(%r13)
/* Switch on lowcore protection */
stctg %c0,%c0,__SF_EMPTY(%r15)
oi __SF_EMPTY+4(%r15),0x10
lctlg %c0,%c0,__SF_EMPTY(%r15)
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04

View File

@ -73,20 +73,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
quicklist_free_page(QUICK_PT, NULL, pte);
}
#define __pte_free_tlb(tlb,pte) \
#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
static inline void check_pgt_cache(void)
{
quicklist_trim(QUICK_PGD, NULL, 25, 16);

View File

@ -91,9 +91,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp)
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)

View File

@ -44,8 +44,8 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
@ -62,7 +62,7 @@ BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif /* _SPARC_PGALLOC_H */

View File

@ -100,9 +100,9 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
}
#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)

View File

@ -40,7 +40,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
#define __pte_free_tlb(tlb,pte) \
#define __pte_free_tlb(tlb,pte, address) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb),(pte)); \
@ -53,7 +53,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
#endif
#define check_pgt_cache() do { } while (0)

View File

@ -116,11 +116,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp)
#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define tlb_migrate_finish(mm) do {} while (0)

View File

@ -46,7 +46,13 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
__free_page(pte);
}
extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
unsigned long address)
{
___pte_free_tlb(tlb, pte);
}
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
@ -78,7 +84,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long adddress)
{
___pmd_free_tlb(tlb, pmd);
}
#ifdef CONFIG_X86_PAE
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
@ -108,7 +120,14 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
free_page((unsigned long)pud);
}
extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
___pud_free_tlb(tlb, pud);
}
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* PAGETABLE_LEVELS > 2 */

View File

@ -212,9 +212,9 @@ extern int __get_user_bad(void);
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#define __put_user_asm_u64(x, ptr, retval, errret) \
__put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
__put_user_asm(x, ptr, retval, "q", "", "er", errret)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "Zr")
__put_user_asm_ex(x, addr, "q", "", "er")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif

View File

@ -88,11 +88,11 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
ret, "l", "k", "ir", 4);
return ret;
case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "ir", 8);
ret, "q", "", "er", 8);
return ret;
case 10:
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "ir", 10);
ret, "q", "", "er", 10);
if (unlikely(ret))
return ret;
asm("":::"memory");
@ -101,12 +101,12 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
return ret;
case 16:
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "ir", 16);
ret, "q", "", "er", 16);
if (unlikely(ret))
return ret;
asm("":::"memory");
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
ret, "q", "", "ir", 8);
ret, "q", "", "er", 8);
return ret;
default:
return copy_user_generic((__force void *)dst, src, size);
@ -157,7 +157,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
ret, "q", "", "=r", 8);
if (likely(!ret))
__put_user_asm(tmp, (u64 __user *)dst,
ret, "q", "", "ir", 8);
ret, "q", "", "er", 8);
return ret;
}
default:

View File

@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
#endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
/* check CPU config space for extended APIC ID */
if (c->x86 >= 0xf) {
if (cpu_has_apic && c->x86 >= 0xf) {
unsigned int val;
val = read_pci_config(0, 24, 0, 0x68);
if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))

View File

@ -1692,17 +1692,15 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
const char *buf, size_t siz)
{
char *p;
int len;
strncpy(mce_helper, buf, sizeof(mce_helper));
mce_helper[sizeof(mce_helper)-1] = 0;
len = strlen(mce_helper);
p = strchr(mce_helper, '\n');
if (*p)
if (p)
*p = 0;
return len;
return strlen(mce_helper) + !!p;
}
static ssize_t set_ignore_ce(struct sys_device *s,

View File

@ -187,7 +187,7 @@ static void __init apic_intr_init(void)
#ifdef CONFIG_X86_THERMAL_VECTOR
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif
#ifdef CONFIG_X86_THRESHOLD
#ifdef CONFIG_X86_MCE_THRESHOLD
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#endif
#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC)

View File

@ -347,7 +347,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
static struct irqaction mfgptirq = {
.handler = mfgpt_tick,
.flags = IRQF_DISABLED | IRQF_NOBALANCING,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
.name = "mfgpt-timer"
};

View File

@ -249,6 +249,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
},
},
{ /* Handle problems with rebooting on CompuLab SBC-FITPC2 */
.callback = set_bios_reboot,
.ident = "CompuLab SBC-FITPC2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"),
DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
},
},
{ }
};

View File

@ -672,6 +672,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
},
},
{
/*
* AMI BIOS with low memory corruption was found on Intel DG45ID board.
* It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
* match only DMI_BOARD_NAME and see if there is more bad products
* with this vendor.
*/
.callback = dmi_low_memory_corruption,
.ident = "AMI BIOS",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
},
},
#endif
{}
};

View File

@ -112,11 +112,6 @@ SECTIONS
_sdata = .;
DATA_DATA
CONSTRUCTORS
#ifdef CONFIG_X86_64
/* End of data section */
_edata = .;
#endif
} :data
#ifdef CONFIG_X86_32
@ -156,10 +151,8 @@ SECTIONS
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
*(.data.read_mostly)
#ifdef CONFIG_X86_32
/* End of data section */
_edata = .;
#endif
}
#ifdef CONFIG_X86_64

View File

@ -103,6 +103,7 @@ EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
EXPORT_SYMBOL(kmap_atomic_prot);
void __init set_highmem_pages_init(void)
{

View File

@ -25,7 +25,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte;
}
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
pgtable_page_dtor(pte);
paravirt_release_pte(page_to_pfn(pte));
@ -33,14 +33,14 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
}
#if PAGETABLE_LEVELS > 2
void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pmd));
}
#if PAGETABLE_LEVELS > 3
void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pud));

View File

@ -79,8 +79,10 @@ static __init void bad_srat(void)
acpi_numa = -1;
for (i = 0; i < MAX_LOCAL_APIC; i++)
apicid_to_node[i] = NUMA_NO_NODE;
for (i = 0; i < MAX_NUMNODES; i++)
nodes_add[i].start = nodes[i].end = 0;
for (i = 0; i < MAX_NUMNODES; i++) {
nodes[i].start = nodes[i].end = 0;
nodes_add[i].start = nodes_add[i].end = 0;
}
remove_all_active_ranges();
}

View File

@ -42,6 +42,6 @@
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#endif /* _XTENSA_TLB_H */

View File

@ -515,10 +515,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

View File

@ -596,9 +596,12 @@ static const struct ich_laptop ich_laptop[] = {
{ 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
{ 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
{ 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
{ 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */
{ 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
{ 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
{ 0x27DF, 0x103C, 0x361a }, /* ICH7 on unkown HP */
{ 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
{ 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
{ 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */

View File

@ -1515,6 +1515,7 @@ static int ata_hpa_resize(struct ata_device *dev)
return rc;
}
dev->n_native_sectors = native_sectors;
/* nothing to do? */
if (native_sectors <= sectors || !ata_ignore_hpa) {
@ -4099,6 +4100,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
unsigned int readid_flags)
{
u64 n_sectors = dev->n_sectors;
u64 n_native_sectors = dev->n_native_sectors;
int rc;
if (!ata_dev_enabled(dev))
@ -4128,16 +4130,30 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
/* verify n_sectors hasn't changed */
if (dev->class == ATA_DEV_ATA && n_sectors &&
dev->n_sectors != n_sectors) {
ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch "
"%llu != %llu\n",
(unsigned long long)n_sectors,
(unsigned long long)dev->n_sectors);
/* restore original n_sectors */
dev->n_sectors = n_sectors;
rc = -ENODEV;
goto fail;
/*
* Something could have caused HPA to be unlocked
* involuntarily. If n_native_sectors hasn't changed
* and the new size matches it, keep the device.
*/
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors > n_sectors &&
dev->n_sectors == n_native_sectors) {
ata_dev_printk(dev, KERN_WARNING,
"new n_sectors matches native, probably "
"late HPA unlock, continuing\n");
/* keep using the old n_sectors */
dev->n_sectors = n_sectors;
} else {
/* restore original n_[native]_sectors and fail */
dev->n_native_sectors = n_native_sectors;
dev->n_sectors = n_sectors;
rc = -ENODEV;
goto fail;
}
}
return 0;

View File

@ -2327,7 +2327,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
struct ata_port *ap = link->ap;
struct ata_link *slave = ap->slave_link;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_eh_context *sehc = &slave->eh_context;
struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
unsigned int *classes = ehc->classes;
unsigned int lflags = link->flags;
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);

View File

@ -26,9 +26,7 @@
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <mach/at91sam9260_matrix.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91sam9260.h>
#include <mach/board.h>
#include <mach/gpio.h>
@ -44,65 +42,62 @@ struct at91_ide_info {
unsigned long mode;
unsigned int cs;
struct clk *mck;
void __iomem *ide_addr;
void __iomem *alt_addr;
};
const struct ata_timing initial_timing =
static const struct ata_timing initial_timing =
{XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz)
{
unsigned long mul;
/*
* cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] =
* x * (f / 1_000_000_000) =
* x * ((f * 65536) / 1_000_000_000) / 65536 =
* x * (((f / 10_000) * 65536) / 100_000) / 65536 =
*/
/*
* cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] =
* x * (f / 1_000_000_000) =
* x * ((f * 65536) / 1_000_000_000) / 65536 =
* x * (((f / 10_000) * 65536) / 100_000) / 65536 =
*/
mul = (mck_hz / 10000) << 16;
mul /= 100000;
mul = (mck_hz / 10000) << 16;
mul /= 100000;
return (ns * mul + 65536) >> 16; /* rounding */
return (ns * mul + 65536) >> 16; /* rounding */
}
static void set_smc_mode(struct at91_ide_info *info)
{
at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
return;
at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
return;
}
static void set_smc_timing(struct device *dev,
struct at91_ide_info *info, const struct ata_timing *ata)
{
int read_cycle, write_cycle, active, recover;
int nrd_setup, nrd_pulse, nrd_recover;
int nwe_setup, nwe_pulse;
unsigned long read_cycle, write_cycle, active, recover;
unsigned long nrd_setup, nrd_pulse, nrd_recover;
unsigned long nwe_setup, nwe_pulse;
int ncs_write_setup, ncs_write_pulse;
int ncs_read_setup, ncs_read_pulse;
unsigned long ncs_write_setup, ncs_write_pulse;
unsigned long ncs_read_setup, ncs_read_pulse;
unsigned int mck_hz;
struct clk *mck;
unsigned long mck_hz;
read_cycle = ata->cyc8b;
nrd_setup = ata->setup;
nrd_pulse = ata->act8b;
nrd_recover = ata->rec8b;
mck = clk_get(NULL, "mck");
BUG_ON(IS_ERR(mck));
mck_hz = clk_get_rate(mck);
mck_hz = clk_get_rate(info->mck);
read_cycle = calc_mck_cycles(read_cycle, mck_hz);
nrd_setup = calc_mck_cycles(nrd_setup, mck_hz);
nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz);
nrd_recover = calc_mck_cycles(nrd_recover, mck_hz);
clk_put(mck);
active = nrd_setup + nrd_pulse;
recover = read_cycle - active;
@ -121,13 +116,13 @@ static void set_smc_timing(struct device *dev,
ncs_write_setup = ncs_read_setup;
ncs_write_pulse = ncs_read_pulse;
dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n",
dev_dbg(dev, "ATA timings: nrd_setup = %lu nrd_pulse = %lu nrd_cycle = %lu\n",
nrd_setup, nrd_pulse, read_cycle);
dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n",
dev_dbg(dev, "ATA timings: nwe_setup = %lu nwe_pulse = %lu nwe_cycle = %lu\n",
nwe_setup, nwe_pulse, write_cycle);
dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n",
dev_dbg(dev, "ATA timings: ncs_read_setup = %lu ncs_read_pulse = %lu\n",
ncs_read_setup, ncs_read_pulse);
dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n",
dev_dbg(dev, "ATA timings: ncs_write_setup = %lu ncs_write_pulse = %lu\n",
ncs_write_setup, ncs_write_pulse);
at91_sys_write(AT91_SMC_SETUP(info->cs),
@ -217,6 +212,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
struct resource *mem_res;
struct ata_host *host;
struct ata_port *ap;
int irq_flags = 0;
int irq = 0;
int ret;
@ -261,6 +257,13 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
return -ENOMEM;
}
info->mck = clk_get(NULL, "mck");
if (IS_ERR(info->mck)) {
dev_err(dev, "failed to get access to mck clock\n");
return -ENODEV;
}
info->cs = board->chipselect;
info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT |
@ -304,6 +307,7 @@ err_alt_ioremap:
devm_iounmap(dev, info->ide_addr);
err_ide_ioremap:
clk_put(info->mck);
kfree(info);
return ret;
@ -326,6 +330,7 @@ static int __devexit pata_at91_remove(struct platform_device *pdev)
devm_iounmap(dev, info->ide_addr);
devm_iounmap(dev, info->alt_addr);
clk_put(info->mck);
kfree(info);
return 0;

View File

@ -653,7 +653,8 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
ap = host->ports[i];
ocd = ap->dev->platform_data;
if (!ap || (ap->flags & ATA_FLAG_DISABLED))
if (ap->flags & ATA_FLAG_DISABLED)
continue;
ocd = ap->dev->platform_data;

View File

@ -411,6 +411,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),

View File

@ -2514,7 +2514,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
char *when = "idle";
ata_ehi_clear_desc(ehi);
if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
if (ap->flags & ATA_FLAG_DISABLED) {
when = "disabled";
} else if (edma_was_enabled) {
when = "EDMA enabled";

View File

@ -532,7 +532,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
struct ata_port *ap = host->ports[i];
u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
if (unlikely(ap->flags & ATA_FLAG_DISABLED))
continue;
/* turn off SATA_IRQ if not supported */

View File

@ -180,7 +180,6 @@ static ssize_t firmware_loading_store(struct device *dev,
goto err;
}
/* Pages will be freed by vfree() */
fw_priv->pages = NULL;
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
complete(&fw_priv->completion);

View File

@ -275,9 +275,9 @@ int sysdev_register(struct sys_device *sysdev)
drv->add(sysdev);
}
mutex_unlock(&sysdev_drivers_lock);
kobject_uevent(&sysdev->kobj, KOBJ_ADD);
}
kobject_uevent(&sysdev->kobj, KOBJ_ADD);
return error;
}

View File

@ -1583,6 +1583,7 @@ static int n_tty_open(struct tty_struct *tty)
static inline int input_available_p(struct tty_struct *tty, int amt)
{
tty_flush_to_ldisc(tty);
if (tty->icanon) {
if (tty->canon_data)
return 1;

View File

@ -461,6 +461,19 @@ static void flush_to_ldisc(struct work_struct *work)
tty_ldisc_deref(disc);
}
/**
* tty_flush_to_ldisc
* @tty: tty to push
*
* Push the terminal flip buffers to the line discipline.
*
* Must not be called from IRQ context.
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
flush_to_ldisc(&tty->buf.work.work);
}
/**
* tty_flip_buffer_push - terminal
* @tty: tty to push

View File

@ -970,7 +970,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
}
for (cs = 0; cs < pvt->num_dcsm; cs++) {
reg = K8_DCSB0 + (cs * 4);
reg = K8_DCSM0 + (cs * 4);
err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
&pvt->dcsm0[cs]);
if (unlikely(err))

View File

@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \
radeon_test.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o

View File

@ -31,6 +31,132 @@
#include "atom.h"
#include "atom-bits.h"
static void atombios_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
SET_CRTC_OVERSCAN_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
int a1, a2;
memset(&args, 0, sizeof(args));
args.usOverscanRight = 0;
args.usOverscanLeft = 0;
args.usOverscanBottom = 0;
args.usOverscanTop = 0;
args.ucCRTC = radeon_crtc->crtc_id;
switch (radeon_crtc->rmx_type) {
case RMX_CENTER:
args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
break;
case RMX_ASPECT:
a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
if (a1 > a2) {
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
} else if (a2 > a1) {
args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
break;
case RMX_FULL:
default:
args.usOverscanRight = 0;
args.usOverscanLeft = 0;
args.usOverscanBottom = 0;
args.usOverscanTop = 0;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
break;
}
}
static void atombios_scaler_setup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
ENABLE_SCALER_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
/* fixme - fill in enc_priv for atom dac */
enum radeon_tv_std tv_std = TV_STD_NTSC;
if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
return;
memset(&args, 0, sizeof(args));
args.ucScaler = radeon_crtc->crtc_id;
if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) {
switch (tv_std) {
case TV_STD_NTSC:
default:
args.ucTVStandard = ATOM_TV_NTSC;
break;
case TV_STD_PAL:
args.ucTVStandard = ATOM_TV_PAL;
break;
case TV_STD_PAL_M:
args.ucTVStandard = ATOM_TV_PALM;
break;
case TV_STD_PAL_60:
args.ucTVStandard = ATOM_TV_PAL60;
break;
case TV_STD_NTSC_J:
args.ucTVStandard = ATOM_TV_NTSCJ;
break;
case TV_STD_SCART_PAL:
args.ucTVStandard = ATOM_TV_PAL; /* ??? */
break;
case TV_STD_SECAM:
args.ucTVStandard = ATOM_TV_SECAM;
break;
case TV_STD_PAL_CN:
args.ucTVStandard = ATOM_TV_PALCN;
break;
}
args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
} else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) {
args.ucTVStandard = ATOM_TV_CV;
args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
} else {
switch (radeon_crtc->rmx_type) {
case RMX_FULL:
args.ucEnable = ATOM_SCALER_EXPANSION;
break;
case RMX_CENTER:
args.ucEnable = ATOM_SCALER_CENTER;
break;
case RMX_ASPECT:
args.ucEnable = ATOM_SCALER_EXPANSION;
break;
default:
if (ASIC_IS_AVIVO(rdev))
args.ucEnable = ATOM_SCALER_DISABLE;
else
args.ucEnable = ATOM_SCALER_CENTER;
break;
}
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
&& rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
atom_rv515_force_tv_scaler(rdev);
}
}
static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (ASIC_IS_AVIVO(rdev)) {
uint32_t ss_cntl;
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
RADEON_PLL_PREFER_CLOSEST_LOWER);
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_gem_object *obj;
struct drm_radeon_gem_object *obj_priv;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
if (!crtc->fb)
return -EINVAL;
@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
/* TODO tiling */
radeon_object_get_tiling_flags(obj->driver_private,
&tiling_flags, NULL);
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= AVIVO_D1GRPH_TILED;
if (radeon_crtc->crtc_id == 0)
WREG32(AVIVO_D1VGA_CONTROL, 0);
else
@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_set_surface(crtc);
}
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
radeon_bandwidth_update(rdev);
return 0;
}
@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
}
@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
void radeon_init_disp_bw_avivo(struct drm_device *dev,
struct drm_display_mode *mode1,
uint32_t pixel_bytes1,
struct drm_display_mode *mode2,
uint32_t pixel_bytes2)
{
struct radeon_device *rdev = dev->dev_private;
fixed20_12 min_mem_eff;
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
fixed20_12 sclk_ff, mclk_ff;
uint32_t dc_lb_memory_split, temp;
min_mem_eff.full = rfixed_const_8(0);
if (rdev->disp_priority == 2) {
uint32_t mc_init_misc_lat_timer = 0;
if (rdev->family == CHIP_RV515)
mc_init_misc_lat_timer =
RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
else if (rdev->family == CHIP_RS690)
mc_init_misc_lat_timer =
RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
mc_init_misc_lat_timer &=
~(R300_MC_DISP1R_INIT_LAT_MASK <<
R300_MC_DISP1R_INIT_LAT_SHIFT);
mc_init_misc_lat_timer &=
~(R300_MC_DISP0R_INIT_LAT_MASK <<
R300_MC_DISP0R_INIT_LAT_SHIFT);
if (mode2)
mc_init_misc_lat_timer |=
(1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
if (mode1)
mc_init_misc_lat_timer |=
(1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
if (rdev->family == CHIP_RV515)
WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
mc_init_misc_lat_timer);
else if (rdev->family == CHIP_RS690)
WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
mc_init_misc_lat_timer);
}
/*
* determine is there is enough bw for current mode
*/
temp_ff.full = rfixed_const(100);
mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
temp_ff.full = rfixed_const(temp);
mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
temp_ff.full = rfixed_const(1000);
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
pix_clk.full = rfixed_div(pix_clk, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes1);
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
temp_ff.full = rfixed_const(1000);
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes2);
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
}
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR
("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
printk("peak disp bw %d, mem_bw %d\n",
rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
}
/*
* Line Buffer Setup
* There is a single line buffer shared by both display controllers.
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
* controllers. The paritioning can either be done manually or via one of four
* preset allocations specified in bits 1:0:
* 0 - line buffer is divided in half and shared between each display controller
* 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
* 2 - D1 gets the whole buffer
* 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
* Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
* In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
* 14:4; D2 allocation follows D1.
*/
/* is auto or manual better ? */
dc_lb_memory_split =
RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
#if 1
/* auto */
if (mode1 && mode2) {
if (mode1->hdisplay > mode2->hdisplay) {
if (mode1->hdisplay > 2560)
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
else
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else if (mode2->hdisplay > mode1->hdisplay) {
if (mode2->hdisplay > 2560)
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
else
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else
dc_lb_memory_split |=
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else if (mode1) {
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
} else if (mode2) {
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
}
#else
/* manual */
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
dc_lb_memory_split &=
~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
if (mode1) {
dc_lb_memory_split |=
((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
<< AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
} else if (mode2) {
dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
}
#endif
WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
}

View File

@ -110,7 +110,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
return 0;
}
@ -173,8 +173,12 @@ void r100_mc_setup(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
}
/* Write VRAM size in case we are limiting it */
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
/* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
* if the aperture is 64MB but we have 32MB VRAM
* we report only 32MB VRAM but we have to set MC_FB_LOCATION
* to 64MB, otherwise the gpu accidentially dies */
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
WREG32(RADEON_MC_FB_LOCATION, tmp);
@ -215,7 +219,6 @@ int r100_mc_init(struct radeon_device *rdev)
r100_pci_gart_disable(rdev);
/* Setup GPU memory space */
rdev->mc.vram_location = 0xFFFFFFFFUL;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
@ -752,6 +755,102 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
return 0;
}
/**
* r100_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
* Userspace sends a special sequence for VLINE waits.
* PACKET0 - VLINE_START_END + value
* PACKET0 - WAIT_UNTIL +_value
* RELOC (P3) - crtc_id in reloc.
*
* This function parses this and relocates the VLINE START END
* and WAIT UNTIL packets to the correct crtc.
* It also detects a switched off crtc and nulls out the
* wait in that case.
*/
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, waitreloc;
int crtc_id;
int r;
uint32_t header, h_idx, reg;
ib_chunk = &p->chunks[p->chunk_ib_idx];
/* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
if (r)
return r;
/* check its a wait until and only 1 count */
if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
DRM_ERROR("vline wait had illegal wait until segment\n");
r = -EINVAL;
return r;
}
if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
DRM_ERROR("vline wait had illegal wait until\n");
r = -EINVAL;
return r;
}
/* jump over the NOP */
r = r100_cs_packet_parse(p, &p3reloc, p->idx);
if (r)
return r;
h_idx = p->idx - 2;
p->idx += waitreloc.count;
p->idx += p3reloc.count;
header = ib_chunk->kdata[h_idx];
crtc_id = ib_chunk->kdata[h_idx + 5];
reg = ib_chunk->kdata[h_idx] >> 2;
mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
r = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
if (!crtc->enabled) {
/* if the CRTC isn't enabled - we need to nop out the wait until */
ib_chunk->kdata[h_idx + 2] = PACKET2(0);
ib_chunk->kdata[h_idx + 3] = PACKET2(0);
} else if (crtc_id == 1) {
switch (reg) {
case AVIVO_D1MODE_VLINE_START_END:
header &= R300_CP_PACKET0_REG_MASK;
header |= AVIVO_D2MODE_VLINE_START_END >> 2;
break;
case RADEON_CRTC_GUI_TRIG_VLINE:
header &= R300_CP_PACKET0_REG_MASK;
header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
break;
default:
DRM_ERROR("unknown crtc reloc\n");
r = -EINVAL;
goto out;
}
ib_chunk->kdata[h_idx] = header;
ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
}
out:
mutex_unlock(&p->rdev->ddev->mode_config.mutex);
return r;
}
/**
* r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
* @parser: parser structure holding parsing context.
@ -814,6 +913,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
unsigned idx;
bool onereg;
int r;
u32 tile_flags = 0;
ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
@ -825,6 +925,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
switch (reg) {
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
break;
/* FIXME: only allow PACKET3 blit? easier to check for out of
* range access */
case RADEON_DST_PITCH_OFFSET:
@ -838,7 +947,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
tmp = ib_chunk->kdata[idx] & 0x003fffff;
tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
r100_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
}
tmp |= tile_flags;
ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
break;
case RADEON_RB3D_DEPTHOFFSET:
case RADEON_RB3D_COLOROFFSET:
@ -869,6 +991,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case R300_TX_OFFSET_0+52:
case R300_TX_OFFSET_0+56:
case R300_TX_OFFSET_0+60:
/* rn50 has no 3D engine so fail on any 3d setup */
if (ASIC_IS_RN50(p->rdev)) {
DRM_ERROR("attempt to use RN50 3D engine failed\n");
return -EINVAL;
}
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@ -878,6 +1005,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
}
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
break;
case R300_RB3D_COLORPITCH0:
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
break;
default:
/* FIXME: we don't want to allow anyothers packet */
break;
@ -1256,29 +1402,100 @@ static void r100_vram_get_type(struct radeon_device *rdev)
}
}
void r100_vram_info(struct radeon_device *rdev)
static u32 r100_get_accessible_vram(struct radeon_device *rdev)
{
r100_vram_get_type(rdev);
u32 aper_size;
u8 byte;
aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
/* Set HDP_APER_CNTL only on cards that are known not to be broken,
* that is has the 2nd generation multifunction PCI interface
*/
if (rdev->family == CHIP_RV280 ||
rdev->family >= CHIP_RV350) {
WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
~RADEON_HDP_APER_CNTL);
DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
return aper_size * 2;
}
/* Older cards have all sorts of funny issues to deal with. First
* check if it's a multifunction card by reading the PCI config
* header type... Limit those to one aperture size
*/
pci_read_config_byte(rdev->pdev, 0xe, &byte);
if (byte & 0x80) {
DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
DRM_INFO("Limiting VRAM to one aperture\n");
return aper_size;
}
/* Single function older card. We read HDP_APER_CNTL to see how the BIOS
* have set it up. We don't write this as it's broken on some ASICs but
* we expect the BIOS to have done the right thing (might be too optimistic...)
*/
if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
return aper_size * 2;
return aper_size;
}
void r100_vram_init_sizes(struct radeon_device *rdev)
{
u64 config_aper_size;
u32 accessible;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
/* read NB_TOM to get the amount of ram stolen for the GPU */
tom = RREG32(RADEON_NB_TOM);
rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
/* for IGPs we need to keep VRAM where it was put by the BIOS */
rdev->mc.vram_location = (tom & 0xffff) << 16;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
} else {
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
/* Some production boards of m6 will report 0
* if it's 8 MB
*/
if (rdev->mc.vram_size == 0) {
rdev->mc.vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
if (rdev->mc.real_vram_size == 0) {
rdev->mc.real_vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
}
/* let driver place VRAM */
rdev->mc.vram_location = 0xFFFFFFFFUL;
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
* Novell bug 204882 + along with lots of ubuntu ones */
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
/* work out accessible VRAM */
accessible = r100_get_accessible_vram(rdev);
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
if (accessible > rdev->mc.aper_size)
accessible = rdev->mc.aper_size;
if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
rdev->mc.mc_vram_size = rdev->mc.aper_size;
if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
void r100_vram_info(struct radeon_device *rdev)
{
r100_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
}
@ -1533,3 +1750,530 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev)
return 0;
#endif
}
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
{
int surf_index = reg * 16;
int flags = 0;
/* r100/r200 divide by 16 */
if (rdev->family < CHIP_R300)
flags = pitch / 16;
else
flags = pitch / 8;
if (rdev->family <= CHIP_RS200) {
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
== (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
flags |= RADEON_SURF_TILE_COLOR_BOTH;
if (tiling_flags & RADEON_TILING_MACRO)
flags |= RADEON_SURF_TILE_COLOR_MACRO;
} else if (rdev->family <= CHIP_RV280) {
if (tiling_flags & (RADEON_TILING_MACRO))
flags |= R200_SURF_TILE_COLOR_MACRO;
if (tiling_flags & RADEON_TILING_MICRO)
flags |= R200_SURF_TILE_COLOR_MICRO;
} else {
if (tiling_flags & RADEON_TILING_MACRO)
flags |= R300_SURF_TILE_MACRO;
if (tiling_flags & RADEON_TILING_MICRO)
flags |= R300_SURF_TILE_MICRO;
}
DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
return 0;
}
void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
{
int surf_index = reg * 16;
WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
}
void r100_bandwidth_update(struct radeon_device *rdev)
{
fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
fixed20_12 memtcas_ff[8] = {
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(0),
fixed_init_half(1),
fixed_init_half(2),
fixed_init(0),
};
fixed20_12 memtcas_rs480_ff[8] = {
fixed_init(0),
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(0),
fixed_init_half(1),
fixed_init_half(2),
fixed_init_half(3),
};
fixed20_12 memtcas2_ff[8] = {
fixed_init(0),
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(4),
fixed_init(5),
fixed_init(6),
fixed_init(7),
};
fixed20_12 memtrbs[8] = {
fixed_init(1),
fixed_init_half(1),
fixed_init(2),
fixed_init_half(2),
fixed_init(3),
fixed_init_half(3),
fixed_init(4),
fixed_init_half(4)
};
fixed20_12 memtrbs_r4xx[8] = {
fixed_init(4),
fixed_init(5),
fixed_init(6),
fixed_init(7),
fixed_init(8),
fixed_init(9),
fixed_init(10),
fixed_init(11)
};
fixed20_12 min_mem_eff;
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
fixed20_12 cur_latency_mclk, cur_latency_sclk;
fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
disp_drain_rate2, read_return_rate;
fixed20_12 time_disp1_drop_priority;
int c;
int cur_size = 16; /* in octawords */
int critical_point = 0, critical_point2;
/* uint32_t read_return_rate, time_disp1_drop_priority; */
int stop_req, max_stop_req;
struct drm_display_mode *mode1 = NULL;
struct drm_display_mode *mode2 = NULL;
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
if (rdev->mode_info.crtcs[0]->base.enabled) {
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
}
if (rdev->mode_info.crtcs[1]->base.enabled) {
mode2 = &rdev->mode_info.crtcs[1]->base.mode;
pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
}
min_mem_eff.full = rfixed_const_8(0);
/* get modes */
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
/* check crtc enables */
if (mode2)
mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
if (mode1)
mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
}
/*
* determine is there is enough bw for current mode
*/
mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
temp_ff.full = rfixed_const(100);
mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
temp_ff.full = rfixed_const(temp);
mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
temp_ff.full = rfixed_const(1000);
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
pix_clk.full = rfixed_div(pix_clk, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes1);
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
temp_ff.full = rfixed_const(1000);
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes2);
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
}
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
}
/* Get values from the EXT_MEM_CNTL register...converting its contents. */
temp = RREG32(RADEON_MEM_TIMING_CNTL);
if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
mem_trcd = ((temp >> 2) & 0x3) + 1;
mem_trp = ((temp & 0x3)) + 1;
mem_tras = ((temp & 0x70) >> 4) + 1;
} else if (rdev->family == CHIP_R300 ||
rdev->family == CHIP_R350) { /* r300, r350 */
mem_trcd = (temp & 0x7) + 1;
mem_trp = ((temp >> 8) & 0x7) + 1;
mem_tras = ((temp >> 11) & 0xf) + 4;
} else if (rdev->family == CHIP_RV350 ||
rdev->family <= CHIP_RV380) {
/* rv3x0 */
mem_trcd = (temp & 0x7) + 3;
mem_trp = ((temp >> 8) & 0x7) + 3;
mem_tras = ((temp >> 11) & 0xf) + 6;
} else if (rdev->family == CHIP_R420 ||
rdev->family == CHIP_R423 ||
rdev->family == CHIP_RV410) {
/* r4xx */
mem_trcd = (temp & 0xf) + 3;
if (mem_trcd > 15)
mem_trcd = 15;
mem_trp = ((temp >> 8) & 0xf) + 3;
if (mem_trp > 15)
mem_trp = 15;
mem_tras = ((temp >> 12) & 0x1f) + 6;
if (mem_tras > 31)
mem_tras = 31;
} else { /* RV200, R200 */
mem_trcd = (temp & 0x7) + 1;
mem_trp = ((temp >> 8) & 0x7) + 1;
mem_tras = ((temp >> 12) & 0xf) + 4;
}
/* convert to FF */
trcd_ff.full = rfixed_const(mem_trcd);
trp_ff.full = rfixed_const(mem_trp);
tras_ff.full = rfixed_const(mem_tras);
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
data = (temp & (7 << 20)) >> 20;
if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
if (rdev->family == CHIP_RS480) /* don't think rs400 */
tcas_ff = memtcas_rs480_ff[data];
else
tcas_ff = memtcas_ff[data];
} else
tcas_ff = memtcas2_ff[data];
if (rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) {
/* extra cas latency stored in bits 23-25 0-4 clocks */
data = (temp >> 23) & 0x7;
if (data < 5)
tcas_ff.full += rfixed_const(data);
}
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
/* on the R300, Tcas is included in Trbs.
*/
temp = RREG32(RADEON_MEM_CNTL);
data = (R300_MEM_NUM_CHANNELS_MASK & temp);
if (data == 1) {
if (R300_MEM_USE_CD_CH_ONLY & temp) {
temp = RREG32(R300_MC_IND_INDEX);
temp &= ~R300_MC_IND_ADDR_MASK;
temp |= R300_MC_READ_CNTL_CD_mcind;
WREG32(R300_MC_IND_INDEX, temp);
temp = RREG32(R300_MC_IND_DATA);
data = (R300_MEM_RBS_POSITION_C_MASK & temp);
} else {
temp = RREG32(R300_MC_READ_CNTL_AB);
data = (R300_MEM_RBS_POSITION_A_MASK & temp);
}
} else {
temp = RREG32(R300_MC_READ_CNTL_AB);
data = (R300_MEM_RBS_POSITION_A_MASK & temp);
}
if (rdev->family == CHIP_RV410 ||
rdev->family == CHIP_R420 ||
rdev->family == CHIP_R423)
trbs_ff = memtrbs_r4xx[data];
else
trbs_ff = memtrbs[data];
tcas_ff.full += trbs_ff.full;
}
sclk_eff_ff.full = sclk_ff.full;
if (rdev->flags & RADEON_IS_AGP) {
fixed20_12 agpmode_ff;
agpmode_ff.full = rfixed_const(radeon_agpmode);
temp_ff.full = rfixed_const_666(16);
sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
}
/* TODO PCIE lanes may affect this - agpmode == 16?? */
if (ASIC_IS_R300(rdev)) {
sclk_delay_ff.full = rfixed_const(250);
} else {
if ((rdev->family == CHIP_RV100) ||
rdev->flags & RADEON_IS_IGP) {
if (rdev->mc.vram_is_ddr)
sclk_delay_ff.full = rfixed_const(41);
else
sclk_delay_ff.full = rfixed_const(33);
} else {
if (rdev->mc.vram_width == 128)
sclk_delay_ff.full = rfixed_const(57);
else
sclk_delay_ff.full = rfixed_const(41);
}
}
mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
if (rdev->mc.vram_is_ddr) {
if (rdev->mc.vram_width == 32) {
k1.full = rfixed_const(40);
c = 3;
} else {
k1.full = rfixed_const(20);
c = 1;
}
} else {
k1.full = rfixed_const(40);
c = 3;
}
temp_ff.full = rfixed_const(2);
mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
temp_ff.full = rfixed_const(c);
mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
temp_ff.full = rfixed_const(4);
mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
mc_latency_mclk.full += k1.full;
mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
/*
HW cursor time assuming worst case of full size colour cursor.
*/
temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full += trcd_ff.full;
if (temp_ff.full < tras_ff.full)
temp_ff.full = tras_ff.full;
cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
temp_ff.full = rfixed_const(cur_size);
cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
/*
Find the total latency for the display data.
*/
disp_latency_overhead.full = rfixed_const(80);
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
if (mc_latency_mclk.full > mc_latency_sclk.full)
disp_latency.full = mc_latency_mclk.full;
else
disp_latency.full = mc_latency_sclk.full;
/* setup Max GRPH_STOP_REQ default value */
if (ASIC_IS_RV100(rdev))
max_stop_req = 0x5c;
else
max_stop_req = 0x7c;
if (mode1) {
/* CRTC1
Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
*/
stop_req = mode1->hdisplay * pixel_bytes1 / 16;
if (stop_req > max_stop_req)
stop_req = max_stop_req;
/*
Find the drain rate of the display buffer.
*/
temp_ff.full = rfixed_const((16/pixel_bytes1));
disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
/*
Find the critical point of the display buffer.
*/
crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
crit_point_ff.full += rfixed_const_half(0);
critical_point = rfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point = 0;
}
/*
The critical point should never be above max_stop_req-4. Setting
GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
*/
if (max_stop_req - critical_point < 4)
critical_point = 0;
if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
critical_point = 0x10;
}
temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
temp &= ~(RADEON_GRPH_START_REQ_MASK);
if ((rdev->family == CHIP_R350) &&
(stop_req > 0x15)) {
stop_req -= 0x10;
}
temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
temp |= RADEON_GRPH_BUFFER_SIZE;
temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
RADEON_GRPH_CRITICAL_AT_SOF |
RADEON_GRPH_STOP_CNTL);
/*
Write the result into the register.
*/
WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
(critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
#if 0
if ((rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480)) {
/* attempt to program RS400 disp regs correctly ??? */
temp = RREG32(RS400_DISP1_REG_CNTL);
temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
RS400_DISP1_STOP_REQ_LEVEL_MASK);
WREG32(RS400_DISP1_REQ_CNTL1, (temp |
(critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
(critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
temp = RREG32(RS400_DMIF_MEM_CNTL1);
temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
RS400_DISP1_CRITICAL_POINT_STOP_MASK);
WREG32(RS400_DMIF_MEM_CNTL1, (temp |
(critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
(critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
}
#endif
DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
/* (unsigned int)info->SavedReg->grph_buffer_cntl, */
(unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
}
if (mode2) {
u32 grph2_cntl;
stop_req = mode2->hdisplay * pixel_bytes2 / 16;
if (stop_req > max_stop_req)
stop_req = max_stop_req;
/*
Find the drain rate of the display buffer.
*/
temp_ff.full = rfixed_const((16/pixel_bytes2));
disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
if ((rdev->family == CHIP_R350) &&
(stop_req > 0x15)) {
stop_req -= 0x10;
}
grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
RADEON_GRPH_CRITICAL_AT_SOF |
RADEON_GRPH_STOP_CNTL);
if ((rdev->family == CHIP_RS100) ||
(rdev->family == CHIP_RS200))
critical_point2 = 0;
else {
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
temp_ff.full = rfixed_const(temp);
temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
if (sclk_ff.full < temp_ff.full)
temp_ff.full = sclk_ff.full;
read_return_rate.full = temp_ff.full;
if (mode1) {
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
} else {
time_disp1_drop_priority.full = 0;
}
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
crit_point_ff.full += rfixed_const_half(0);
critical_point2 = rfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point2 = 0;
}
if (max_stop_req - critical_point2 < 4)
critical_point2 = 0;
}
if (critical_point2 == 0 && rdev->family == CHIP_R300) {
/* some R300 cards have problem with this set to 0 */
critical_point2 = 0x10;
}
WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
(critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
if ((rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480)) {
#if 0
/* attempt to program RS400 disp2 regs correctly ??? */
temp = RREG32(RS400_DISP2_REQ_CNTL1);
temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
RS400_DISP2_STOP_REQ_LEVEL_MASK);
WREG32(RS400_DISP2_REQ_CNTL1, (temp |
(critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
(critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
temp = RREG32(RS400_DISP2_REQ_CNTL2);
temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
RS400_DISP2_CRITICAL_POINT_STOP_MASK);
WREG32(RS400_DISP2_REQ_CNTL2, (temp |
(critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
(critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
#endif
WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
}
DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
}
}

View File

@ -30,6 +30,8 @@
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_drm.h"
#include "radeon_share.h"
/* r300,r350,rv350,rv370,rv380 depends on : */
void r100_hdp_reset(struct radeon_device *rdev);
@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev);
int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx);
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24) |
0xc;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
writel(addr, ((void __iomem *)ptr) + (i * 4));
return 0;
}
@ -579,10 +587,8 @@ void r300_vram_info(struct radeon_device *rdev)
} else {
rdev->mc.vram_width = 64;
}
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
r100_vram_init_sizes(rdev);
}
@ -970,7 +976,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track)
static const unsigned r300_reg_safe_bm[159] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
@ -1019,7 +1025,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_reloc *reloc;
struct r300_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
uint32_t tmp, tile_flags = 0;
unsigned i;
int r;
@ -1027,6 +1033,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
ib_chunk = &p->chunks[p->chunk_ib_idx];
track = (struct r300_cs_track*)p->track;
switch(reg) {
case AVIVO_D1MODE_VLINE_START_END:
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
break;
case RADEON_DST_PITCH_OFFSET:
case RADEON_SRC_PITCH_OFFSET:
r = r100_cs_packet_next_reloc(p, &reloc);
@ -1038,7 +1054,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
tmp = ib_chunk->kdata[idx] & 0x003fffff;
tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
r100_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
}
tmp |= tile_flags;
ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
break;
case R300_RB3D_COLOROFFSET0:
case R300_RB3D_COLOROFFSET1:
@ -1127,6 +1155,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
@ -1182,6 +1227,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
break;
case 0x4104:

View File

@ -27,7 +27,9 @@
#ifndef _R300_REG_H_
#define _R300_REG_H_
#define R300_SURF_TILE_MACRO (1<<16)
#define R300_SURF_TILE_MICRO (2<<16)
#define R300_SURF_TILE_BOTH (3<<16)
#define R300_MC_INIT_MISC_LAT_TIMER 0x180

View File

@ -445,6 +445,7 @@
#define AVIVO_D1MODE_DATA_FORMAT 0x6528
# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
#define AVIVO_D1MODE_VLINE_START_END 0x6538
#define AVIVO_D1MODE_VIEWPORT_START 0x6580
#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
@ -496,6 +497,7 @@
#define AVIVO_D2CUR_SIZE 0x6c10
#define AVIVO_D2CUR_POSITION 0x6c14
#define AVIVO_D2MODE_VLINE_START_END 0x6d38
#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88

View File

@ -28,6 +28,7 @@
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_share.h"
/* r520,rv530,rv560,rv570,r580 depends on : */
void r100_hdp_reset(struct radeon_device *rdev);
@ -94,8 +95,8 @@ int r520_mc_init(struct radeon_device *rdev)
"programming pipes. Bad things might happen.\n");
}
/* Write VRAM size in case we are limiting it */
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
WREG32_MC(R520_MC_FB_LOCATION, tmp);
@ -226,9 +227,20 @@ static void r520_vram_get_type(struct radeon_device *rdev)
void r520_vram_info(struct radeon_device *rdev)
{
r520_vram_get_type(rdev);
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
fixed20_12 a;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
r520_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
}
void r520_bandwidth_update(struct radeon_device *rdev)
{
rv515_bandwidth_avivo_update(rdev);
}

View File

@ -67,7 +67,7 @@ int r600_mc_init(struct radeon_device *rdev)
"programming pipes. Bad things might happen.\n");
}
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
WREG32(R600_MC_VM_FB_LOCATION, tmp);
@ -140,7 +140,8 @@ void r600_vram_get_type(struct radeon_device *rdev)
void r600_vram_info(struct radeon_device *rdev)
{
r600_vram_get_type(rdev);
rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);

View File

@ -64,6 +64,7 @@ extern int radeon_agpmode;
extern int radeon_vram_limit;
extern int radeon_gart_size;
extern int radeon_benchmarking;
extern int radeon_testing;
extern int radeon_connector_table;
/*
@ -113,6 +114,7 @@ enum radeon_family {
CHIP_RV770,
CHIP_RV730,
CHIP_RV710,
CHIP_RS880,
CHIP_LAST,
};
@ -201,6 +203,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
/*
* Tiling registers
*/
struct radeon_surface_reg {
struct radeon_object *robj;
};
#define RADEON_GEM_MAX_SURFACES 8
/*
* Radeon buffer.
@ -213,6 +223,7 @@ struct radeon_object_list {
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
uint32_t tiling_flags;
};
int radeon_object_init(struct radeon_device *rdev);
@ -242,8 +253,15 @@ void radeon_object_list_clean(struct list_head *head);
int radeon_object_fbdev_mmap(struct radeon_object *robj,
struct vm_area_struct *vma);
unsigned long radeon_object_size(struct radeon_object *robj);
void radeon_object_clear_surface_reg(struct radeon_object *robj);
int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
bool force_drop);
void radeon_object_set_tiling_flags(struct radeon_object *robj,
uint32_t tiling_flags, uint32_t pitch);
void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
/*
* GEM objects.
*/
@ -315,8 +333,11 @@ struct radeon_mc {
unsigned gtt_location;
unsigned gtt_size;
unsigned vram_location;
unsigned vram_size;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
unsigned mc_vram_size;
unsigned vram_width;
unsigned real_vram_size;
int vram_mtrr;
bool vram_is_ddr;
};
@ -474,6 +495,39 @@ struct radeon_wb {
uint64_t gpu_addr;
};
/**
* struct radeon_pm - power management datas
* @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
* @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
* @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
* @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
* @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
* @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
* @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
* @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
* @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
* @sclk: GPU clock Mhz (core bandwith depends of this clock)
* @needed_bandwidth: current bandwidth needs
*
* It keeps track of various data needed to take powermanagement decision.
* Bandwith need is used to determine minimun clock of the GPU and memory.
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
struct radeon_pm {
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
fixed20_12 igp_ht_link_clk;
fixed20_12 igp_ht_link_width;
fixed20_12 k8_bandwidth;
fixed20_12 sideport_bandwidth;
fixed20_12 ht_bandwidth;
fixed20_12 core_bandwidth;
fixed20_12 sclk;
fixed20_12 needed_bandwidth;
};
/*
* Benchmarking
@ -481,6 +535,12 @@ struct radeon_wb {
void radeon_benchmark(struct radeon_device *rdev);
/*
* Testing
*/
void radeon_test_moves(struct radeon_device *rdev);
/*
* Debugfs
*/
@ -535,6 +595,11 @@ struct radeon_asic {
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
};
union radeon_asic_config {
@ -566,6 +631,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
/*
@ -594,8 +663,8 @@ struct radeon_device {
struct radeon_object *fbdev_robj;
struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
unsigned long rmmio_base;
unsigned long rmmio_size;
resource_size_t rmmio_base;
resource_size_t rmmio_size;
void *rmmio;
radeon_rreg_t mm_rreg;
radeon_wreg_t mm_wreg;
@ -619,11 +688,14 @@ struct radeon_device {
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
struct radeon_pm pm;
struct mutex cs_mutex;
struct radeon_wb wb;
bool gpu_lockup;
bool shutdown;
bool suspend;
bool need_dma32;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
};
int radeon_device_init(struct radeon_device *rdev,
@ -670,6 +742,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
/*
* ASICs helpers.
*/
#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
(rdev->pdev->device == 0x5969))
#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
(rdev->family == CHIP_RV200) || \
(rdev->family == CHIP_RS100) || \
@ -796,5 +870,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
#endif

View File

@ -71,6 +71,11 @@ int r100_copy_blit(struct radeon_device *rdev,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic r100_asic = {
.init = &r100_init,
@ -100,6 +105,9 @@ static struct radeon_asic r100_asic = {
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
};
@ -128,6 +136,7 @@ int r300_copy_dma(struct radeon_device *rdev,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
static struct radeon_asic r300_asic = {
.init = &r300_init,
.errata = &r300_errata,
@ -156,6 +165,9 @@ static struct radeon_asic r300_asic = {
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
};
/*
@ -193,6 +205,9 @@ static struct radeon_asic r420_asic = {
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
};
@ -237,6 +252,9 @@ static struct radeon_asic rs400_asic = {
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
};
@ -254,6 +272,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev);
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs600_asic = {
.init = &r300_init,
.errata = &rs600_errata,
@ -282,6 +301,7 @@ static struct radeon_asic rs600_asic = {
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update,
};
@ -294,6 +314,7 @@ int rs690_mc_init(struct radeon_device *rdev);
void rs690_mc_fini(struct radeon_device *rdev);
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs690_asic = {
.init = &r300_init,
.errata = &rs690_errata,
@ -322,6 +343,9 @@ static struct radeon_asic rs690_asic = {
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
};
@ -339,6 +363,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rv515_asic = {
.init = &rv515_init,
.errata = &rv515_errata,
@ -367,6 +392,9 @@ static struct radeon_asic rv515_asic = {
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
};
@ -377,6 +405,7 @@ void r520_errata(struct radeon_device *rdev);
void r520_vram_info(struct radeon_device *rdev);
int r520_mc_init(struct radeon_device *rdev);
void r520_mc_fini(struct radeon_device *rdev);
void r520_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic r520_asic = {
.init = &rv515_init,
.errata = &r520_errata,
@ -405,6 +434,9 @@ static struct radeon_asic r520_asic = {
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r520_bandwidth_update,
};
/*

View File

@ -103,7 +103,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint32_t supported_device,
int *connector_type,
struct radeon_i2c_bus_rec *i2c_bus)
struct radeon_i2c_bus_rec *i2c_bus,
uint8_t *line_mux)
{
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@ -127,8 +128,10 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
if ((dev->pdev->device == 0x5653) &&
(dev->pdev->subsystem_vendor == 0x1462) &&
(dev->pdev->subsystem_device == 0x0291)) {
if (*connector_type == DRM_MODE_CONNECTOR_LVDS)
if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
i2c_bus->valid = false;
*line_mux = 53;
}
}
/* Funky macbooks */
@ -526,7 +529,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (!radeon_atom_apply_quirks
(dev, (1 << i), &bios_connectors[i].connector_type,
&bios_connectors[i].ddc_bus))
&bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
continue;
bios_connectors[i].valid = true;

View File

@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence);
r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence);
if (r) {
goto out_cleanup;
}
@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence);
r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence);
if (r) {
goto out_cleanup;
}

View File

@ -127,17 +127,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
sizeof(struct drm_radeon_cs_chunk))) {
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].kdata = NULL;
p->chunks[i].chunk_id = user_chunk.chunk_id;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
p->chunk_ib_idx = i;
/* zero length IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
p->chunks[i].length_dw = user_chunk.length_dw;
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
p->chunks[i].kdata = NULL;
size = p->chunks[i].length_dw * sizeof(uint32_t);
p->chunks[i].kdata = kzalloc(size, GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {

View File

@ -111,9 +111,11 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
if (ASIC_IS_AVIVO(rdev))
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
else
else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr);
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
}
}
int radeon_crtc_cursor_set(struct drm_crtc *crtc,
@ -245,6 +247,9 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
(RADEON_CUR_LOCK
| ((xorigin ? 0 : x) << 16)
| (yorigin ? 0 : y)));
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256)));
}
radeon_lock_cursor(crtc, false);

View File

@ -48,6 +48,8 @@ static void radeon_surface_init(struct radeon_device *rdev)
i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
0);
}
/* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0);
}
}
@ -119,7 +121,7 @@ int radeon_mc_setup(struct radeon_device *rdev)
if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
/* vram location was already setup try to put gtt after
* if it fits */
tmp = rdev->mc.vram_location + rdev->mc.vram_size;
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
rdev->mc.gtt_location = tmp;
@ -134,13 +136,13 @@ int radeon_mc_setup(struct radeon_device *rdev)
} else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
/* gtt location was already setup try to put vram before
* if it fits */
if (rdev->mc.vram_size < rdev->mc.gtt_location) {
if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
rdev->mc.vram_location = 0;
} else {
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
tmp += (rdev->mc.vram_size - 1);
tmp &= ~(rdev->mc.vram_size - 1);
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) {
tmp += (rdev->mc.mc_vram_size - 1);
tmp &= ~(rdev->mc.mc_vram_size - 1);
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
rdev->mc.vram_location = tmp;
} else {
printk(KERN_ERR "[drm] vram too big to fit "
@ -150,12 +152,14 @@ int radeon_mc_setup(struct radeon_device *rdev)
}
} else {
rdev->mc.vram_location = 0;
rdev->mc.gtt_location = rdev->mc.vram_size;
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20);
DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20);
DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
rdev->mc.vram_location,
rdev->mc.vram_location + rdev->mc.vram_size - 1);
rdev->mc.vram_location + rdev->mc.mc_vram_size - 1);
if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size)
DRM_INFO("radeon: VRAM less than aperture workaround enabled\n");
DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
rdev->mc.gtt_location,
@ -450,6 +454,7 @@ int radeon_device_init(struct radeon_device *rdev,
uint32_t flags)
{
int r, ret;
int dma_bits;
DRM_INFO("radeon: Initializing kernel modesetting.\n");
rdev->shutdown = false;
@ -492,8 +497,20 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
}
/* Report DMA addressing limitation */
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits (in theory)
* AGP - generally dma32 is safest
* PCI - only dma32
*/
rdev->need_dma32 = false;
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
if (rdev->flags & RADEON_IS_PCI)
rdev->need_dma32 = true;
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
@ -546,27 +563,22 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_combios_asic_init(rdev->ddev);
}
}
/* Get vram informations */
radeon_vram_info(rdev);
/* Device is severly broken if aper size > vram size.
* for RN50/M6/M7 - Novell bug 204882 ?
*/
if (rdev->mc.vram_size < rdev->mc.aper_size) {
rdev->mc.aper_size = rdev->mc.vram_size;
}
/* Add an MTRR for the VRAM */
rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
MTRR_TYPE_WRCOMB, 1);
DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
rdev->mc.vram_size >> 20,
(unsigned)rdev->mc.aper_size >> 20);
DRM_INFO("RAM width %dbits %cDR\n",
rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
/* Initialize clocks */
r = radeon_clocks_init(rdev);
if (r) {
return r;
}
/* Get vram informations */
radeon_vram_info(rdev);
/* Add an MTRR for the VRAM */
rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
MTRR_TYPE_WRCOMB, 1);
DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
rdev->mc.real_vram_size >> 20,
(unsigned)rdev->mc.aper_size >> 20);
DRM_INFO("RAM width %dbits %cDR\n",
rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
/* Initialize memory controller (also test AGP) */
r = radeon_mc_init(rdev);
if (r) {
@ -626,6 +638,9 @@ int radeon_device_init(struct radeon_device *rdev,
if (!ret) {
DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
}
if (radeon_testing) {
radeon_test_moves(rdev);
}
if (radeon_benchmarking) {
radeon_benchmark(rdev);
}

View File

@ -187,6 +187,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
rdev->mode_info.crtcs[index] = radeon_crtc;
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
@ -491,7 +492,11 @@ void radeon_compute_pll(struct radeon_pll *pll,
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
current_freq = radeon_div(tmp, ref_div * post_div);
error = abs(current_freq - freq);
if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
error = freq - current_freq;
error = error < 0 ? 0xffffffff : error;
} else
error = abs(current_freq - freq);
vco_diff = abs(vco - best_vco);
if ((best_vco == 0 && error < best_error) ||
@ -657,36 +662,51 @@ void radeon_modeset_fini(struct radeon_device *rdev)
}
}
void radeon_init_disp_bandwidth(struct drm_device *dev)
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_display_mode *modes[2];
int pixel_bytes[2];
struct drm_crtc *crtc;
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_encoder *radeon_encoder;
bool first = true;
pixel_bytes[0] = pixel_bytes[1] = 0;
modes[0] = modes[1] = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && crtc->fb) {
modes[radeon_crtc->crtc_id] = &crtc->mode;
pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (encoder->crtc != crtc)
continue;
if (first) {
radeon_crtc->rmx_type = radeon_encoder->rmx_type;
radeon_crtc->devices = radeon_encoder->devices;
memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode,
sizeof(struct radeon_native_mode));
first = false;
} else {
if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
/* WARNING: Right now this can't happen but
* in the future we need to check that scaling
* are consistent accross different encoder
* (ie all encoder can work with the same
* scaling).
*/
DRM_ERROR("Scaling not consistent accross encoder.\n");
return false;
}
}
}
if (ASIC_IS_AVIVO(rdev)) {
radeon_init_disp_bw_avivo(dev,
modes[0],
pixel_bytes[0],
modes[1],
pixel_bytes[1]);
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
a.full = rfixed_const(crtc->mode.vdisplay);
b.full = rfixed_const(radeon_crtc->native_mode.panel_xres);
radeon_crtc->vsc.full = rfixed_div(a, b);
a.full = rfixed_const(crtc->mode.hdisplay);
b.full = rfixed_const(radeon_crtc->native_mode.panel_yres);
radeon_crtc->hsc.full = rfixed_div(a, b);
} else {
radeon_init_disp_bw_legacy(dev,
modes[0],
pixel_bytes[0],
modes[1],
pixel_bytes[1]);
radeon_crtc->vsc.full = rfixed_const(1);
radeon_crtc->hsc.full = rfixed_const(1);
}
return true;
}

View File

@ -89,6 +89,7 @@ int radeon_agpmode = 0;
int radeon_vram_limit = 0;
int radeon_gart_size = 512; /* default gart size */
int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
#endif
@ -117,6 +118,9 @@ module_param_named(gartsize, radeon_gart_size, int, 0600);
MODULE_PARM_DESC(benchmark, "Run benchmark");
module_param_named(benchmark, radeon_benchmarking, int, 0444);
MODULE_PARM_DESC(test, "Run tests");
module_param_named(test, radeon_testing, int, 0444);
MODULE_PARM_DESC(connector_table, "Force connector table");
module_param_named(connector_table, radeon_connector_table, int, 0444);
#endif

Some files were not shown because too many files have changed in this diff Show More